summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/include/linux')
-rw-r--r--kernel/include/linux/acpi.h260
-rw-r--r--kernel/include/linux/acpi_irq.h10
-rw-r--r--kernel/include/linux/aer.h5
-rw-r--r--kernel/include/linux/alarmtimer.h4
-rw-r--r--kernel/include/linux/amba/bus.h2
-rw-r--r--kernel/include/linux/amba/sp810.h2
-rw-r--r--kernel/include/linux/arcdevice.h342
-rw-r--r--kernel/include/linux/asn1_ber_bytecode.h16
-rw-r--r--kernel/include/linux/ata.h16
-rw-r--r--kernel/include/linux/atmel_serial.h240
-rw-r--r--kernel/include/linux/atmel_tc.h1
-rw-r--r--kernel/include/linux/atomic.h459
-rw-r--r--kernel/include/linux/audit.h12
-rw-r--r--kernel/include/linux/average.h61
-rw-r--r--kernel/include/linux/backing-dev-defs.h259
-rw-r--r--kernel/include/linux/backing-dev.h510
-rw-r--r--kernel/include/linux/backlight.h8
-rw-r--r--kernel/include/linux/basic_mmio_gpio.h2
-rw-r--r--kernel/include/linux/bcm47xx_nvram.h17
-rw-r--r--kernel/include/linux/bcma/bcma.h11
-rw-r--r--kernel/include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--kernel/include/linux/bcma/bcma_driver_pci.h11
-rw-r--r--kernel/include/linux/bio.h90
-rw-r--r--kernel/include/linux/bitmap.h2
-rw-r--r--kernel/include/linux/bitops.h21
-rw-r--r--kernel/include/linux/blk-cgroup.h786
-rw-r--r--kernel/include/linux/blk-mq.h18
-rw-r--r--kernel/include/linux/blk_types.h56
-rw-r--r--kernel/include/linux/blkdev.h233
-rw-r--r--kernel/include/linux/blkpg.h21
-rw-r--r--kernel/include/linux/bootmem.h8
-rw-r--r--kernel/include/linux/bottom_half.h33
-rw-r--r--kernel/include/linux/bpf.h69
-rw-r--r--kernel/include/linux/brcmphy.h36
-rw-r--r--kernel/include/linux/buffer_head.h8
-rw-r--r--kernel/include/linux/cacheinfo.h2
-rw-r--r--kernel/include/linux/can/dev.h9
-rw-r--r--kernel/include/linux/can/led.h1
-rw-r--r--kernel/include/linux/ceph/ceph_features.h1
-rw-r--r--kernel/include/linux/ceph/libceph.h27
-rw-r--r--kernel/include/linux/ceph/messenger.h27
-rw-r--r--kernel/include/linux/ceph/msgr.h4
-rw-r--r--kernel/include/linux/ceph/osd_client.h2
-rw-r--r--kernel/include/linux/cgroup-defs.h532
-rw-r--r--kernel/include/linux/cgroup.h1090
-rw-r--r--kernel/include/linux/cgroup_subsys.h30
-rw-r--r--kernel/include/linux/clk-provider.h155
-rw-r--r--kernel/include/linux/clk.h27
-rw-r--r--kernel/include/linux/clk/at91_pmc.h34
-rw-r--r--kernel/include/linux/clk/clk-conf.h2
-rw-r--r--kernel/include/linux/clk/shmobile.h12
-rw-r--r--kernel/include/linux/clk/tegra.h3
-rw-r--r--kernel/include/linux/clk/ti.h160
-rw-r--r--kernel/include/linux/clkdev.h11
-rw-r--r--kernel/include/linux/clockchips.h65
-rw-r--r--kernel/include/linux/clocksource.h14
-rw-r--r--kernel/include/linux/cma.h2
-rw-r--r--kernel/include/linux/com20020.h145
-rw-r--r--kernel/include/linux/compaction.h3
-rw-r--r--kernel/include/linux/compat.h2
-rw-r--r--kernel/include/linux/compiler-gcc.h240
-rw-r--r--kernel/include/linux/compiler-gcc3.h23
-rw-r--r--kernel/include/linux/compiler-gcc4.h91
-rw-r--r--kernel/include/linux/compiler-gcc5.h67
-rw-r--r--kernel/include/linux/compiler.h104
-rw-r--r--kernel/include/linux/completion.h8
-rw-r--r--kernel/include/linux/configfs.h111
-rw-r--r--kernel/include/linux/console.h2
-rw-r--r--kernel/include/linux/console_struct.h1
-rw-r--r--kernel/include/linux/context_tracking.h33
-rw-r--r--kernel/include/linux/context_tracking_state.h2
-rw-r--r--kernel/include/linux/coresight.h23
-rw-r--r--kernel/include/linux/count_zeros.h57
-rw-r--r--kernel/include/linux/cpu.h9
-rw-r--r--kernel/include/linux/cpu_cooling.h39
-rw-r--r--kernel/include/linux/cpufeature.h7
-rw-r--r--kernel/include/linux/cpufreq.h43
-rw-r--r--kernel/include/linux/cpuidle.h21
-rw-r--r--kernel/include/linux/cpuset.h16
-rw-r--r--kernel/include/linux/crc-itu-t.h2
-rw-r--r--kernel/include/linux/crc-t10dif.h1
-rw-r--r--kernel/include/linux/cred.h8
-rw-r--r--kernel/include/linux/crush/crush.h40
-rw-r--r--kernel/include/linux/crush/hash.h6
-rw-r--r--kernel/include/linux/crush/mapper.h2
-rw-r--r--kernel/include/linux/crypto.h535
-rw-r--r--kernel/include/linux/cryptouser.h105
-rw-r--r--kernel/include/linux/dax.h39
-rw-r--r--kernel/include/linux/dcache.h14
-rw-r--r--kernel/include/linux/dccp.h6
-rw-r--r--kernel/include/linux/debugfs.h27
-rw-r--r--kernel/include/linux/devfreq.h24
-rw-r--r--kernel/include/linux/devfreq_cooling.h81
-rw-r--r--kernel/include/linux/device-mapper.h10
-rw-r--r--kernel/include/linux/device.h112
-rw-r--r--kernel/include/linux/devpts_fs.h4
-rw-r--r--kernel/include/linux/dma-buf.h10
-rw-r--r--kernel/include/linux/dma-contiguous.h4
-rw-r--r--kernel/include/linux/dma-iommu.h85
-rw-r--r--kernel/include/linux/dma-mapping.h18
-rw-r--r--kernel/include/linux/dma/hsu.h11
-rw-r--r--kernel/include/linux/dma/pxa-dma.h27
-rw-r--r--kernel/include/linux/dma_remapping.h8
-rw-r--r--kernel/include/linux/dmaengine.h153
-rw-r--r--kernel/include/linux/dmapool.h8
-rw-r--r--kernel/include/linux/dmar.h85
-rw-r--r--kernel/include/linux/dmi.h4
-rw-r--r--kernel/include/linux/dns_resolver.h2
-rw-r--r--kernel/include/linux/edac.h4
-rw-r--r--kernel/include/linux/efi.h48
-rw-r--r--kernel/include/linux/elevator.h2
-rw-r--r--kernel/include/linux/enclosure.h4
-rw-r--r--kernel/include/linux/etherdevice.h44
-rw-r--r--kernel/include/linux/extcon.h151
-rw-r--r--kernel/include/linux/extcon/extcon-adc-jack.h5
-rw-r--r--kernel/include/linux/extcon/extcon-gpio.h24
-rw-r--r--kernel/include/linux/f2fs_fs.h24
-rw-r--r--kernel/include/linux/fault-inject.h2
-rw-r--r--kernel/include/linux/fb.h9
-rw-r--r--kernel/include/linux/fdtable.h9
-rw-r--r--kernel/include/linux/fence.h25
-rw-r--r--kernel/include/linux/filter.h112
-rw-r--r--kernel/include/linux/fpga/fpga-mgr.h127
-rw-r--r--kernel/include/linux/frontswap.h14
-rw-r--r--kernel/include/linux/fs.h145
-rw-r--r--kernel/include/linux/fscache-cache.h55
-rw-r--r--kernel/include/linux/fsl/guts.h192
-rw-r--r--kernel/include/linux/fsl_devices.h19
-rw-r--r--kernel/include/linux/fsl_ifc.h50
-rw-r--r--kernel/include/linux/fsnotify_backend.h61
-rw-r--r--kernel/include/linux/ftrace.h24
-rw-r--r--kernel/include/linux/fwnode.h2
-rw-r--r--kernel/include/linux/genalloc.h10
-rw-r--r--kernel/include/linux/genetlink.h2
-rw-r--r--kernel/include/linux/genhd.h59
-rw-r--r--kernel/include/linux/gfp.h324
-rw-r--r--kernel/include/linux/goldfish.h19
-rw-r--r--kernel/include/linux/gpio.h7
-rw-r--r--kernel/include/linux/gpio/consumer.h126
-rw-r--r--kernel/include/linux/gpio/driver.h53
-rw-r--r--kernel/include/linux/gpio/machine.h1
-rw-r--r--kernel/include/linux/gsmmux.h36
-rw-r--r--kernel/include/linux/hardirq.h2
-rw-r--r--kernel/include/linux/hid.h6
-rw-r--r--kernel/include/linux/highmem.h1
-rw-r--r--kernel/include/linux/hrtimer.h202
-rw-r--r--kernel/include/linux/htirq.h22
-rw-r--r--kernel/include/linux/huge_mm.h20
-rw-r--r--kernel/include/linux/hugetlb.h49
-rw-r--r--kernel/include/linux/hugetlb_cgroup.h8
-rw-r--r--kernel/include/linux/hwspinlock.h7
-rw-r--r--kernel/include/linux/hyperv.h74
-rw-r--r--kernel/include/linux/i2c-ocores.h1
-rw-r--r--kernel/include/linux/i2c.h19
-rw-r--r--kernel/include/linux/i2c/i2c-rcar.h10
-rw-r--r--kernel/include/linux/i2c/twl.h1
-rw-r--r--kernel/include/linux/ide.h27
-rw-r--r--kernel/include/linux/ieee80211.h87
-rw-r--r--kernel/include/linux/ieee802154.h63
-rw-r--r--kernel/include/linux/if_bridge.h6
-rw-r--r--kernel/include/linux/if_link.h10
-rw-r--r--kernel/include/linux/if_macvlan.h2
-rw-r--r--kernel/include/linux/if_pppox.h2
-rw-r--r--kernel/include/linux/if_vlan.h28
-rw-r--r--kernel/include/linux/igmp.h4
-rw-r--r--kernel/include/linux/iio/buffer.h3
-rw-r--r--kernel/include/linux/iio/common/st_sensors.h6
-rw-r--r--kernel/include/linux/iio/consumer.h2
-rw-r--r--kernel/include/linux/iio/iio.h6
-rw-r--r--kernel/include/linux/iio/sysfs.h3
-rw-r--r--kernel/include/linux/iio/trigger.h3
-rw-r--r--kernel/include/linux/iio/triggered_buffer.h4
-rw-r--r--kernel/include/linux/iio/triggered_event.h11
-rw-r--r--kernel/include/linux/iio/types.h2
-rw-r--r--kernel/include/linux/inet_diag.h1
-rw-r--r--kernel/include/linux/inetdevice.h13
-rw-r--r--kernel/include/linux/init.h89
-rw-r--r--kernel/include/linux/init_task.h24
-rw-r--r--kernel/include/linux/input.h2
-rw-r--r--kernel/include/linux/input/edt-ft5x06.h24
-rw-r--r--kernel/include/linux/input/touchscreen.h10
-rw-r--r--kernel/include/linux/intel-iommu.h154
-rw-r--r--kernel/include/linux/intel-svm.h121
-rw-r--r--kernel/include/linux/interrupt.h18
-rw-r--r--kernel/include/linux/io-64-nonatomic-hi-lo.h32
-rw-r--r--kernel/include/linux/io-64-nonatomic-lo-hi.h32
-rw-r--r--kernel/include/linux/io-mapping.h2
-rw-r--r--kernel/include/linux/io.h41
-rw-r--r--kernel/include/linux/iommu-common.h1
-rw-r--r--kernel/include/linux/iommu.h54
-rw-r--r--kernel/include/linux/ioport.h1
-rw-r--r--kernel/include/linux/iova.h4
-rw-r--r--kernel/include/linux/ipmi_smi.h7
-rw-r--r--kernel/include/linux/ipv6.h11
-rw-r--r--kernel/include/linux/irq.h192
-rw-r--r--kernel/include/linux/irq_work.h6
-rw-r--r--kernel/include/linux/irqbypass.h90
-rw-r--r--kernel/include/linux/irqchip.h31
-rw-r--r--kernel/include/linux/irqchip/arm-gic-acpi.h31
-rw-r--r--kernel/include/linux/irqchip/arm-gic-v3.h97
-rw-r--r--kernel/include/linux/irqchip/arm-gic.h19
-rw-r--r--kernel/include/linux/irqchip/ingenic.h23
-rw-r--r--kernel/include/linux/irqchip/irq-sa11x0.h17
-rw-r--r--kernel/include/linux/irqchip/mips-gic.h31
-rw-r--r--kernel/include/linux/irqdesc.h83
-rw-r--r--kernel/include/linux/irqdomain.h139
-rw-r--r--kernel/include/linux/irqhandler.h2
-rw-r--r--kernel/include/linux/irqnr.h6
-rw-r--r--kernel/include/linux/irqreturn.h2
-rw-r--r--kernel/include/linux/jbd.h1047
-rw-r--r--kernel/include/linux/jbd2.h149
-rw-r--r--kernel/include/linux/jbd_common.h70
-rw-r--r--kernel/include/linux/jiffies.h145
-rw-r--r--kernel/include/linux/jump_label.h269
-rw-r--r--kernel/include/linux/kasan.h10
-rw-r--r--kernel/include/linux/kdev_t.h9
-rw-r--r--kernel/include/linux/kernel.h80
-rw-r--r--kernel/include/linux/kernfs.h9
-rw-r--r--kernel/include/linux/kexec.h18
-rw-r--r--kernel/include/linux/key-type.h3
-rw-r--r--kernel/include/linux/key.h33
-rw-r--r--kernel/include/linux/klist.h1
-rw-r--r--kernel/include/linux/kmemleak.h2
-rw-r--r--kernel/include/linux/kmod.h2
-rw-r--r--kernel/include/linux/kobject.h7
-rw-r--r--kernel/include/linux/kprobes.h2
-rw-r--r--kernel/include/linux/kref.h33
-rw-r--r--kernel/include/linux/kthread.h3
-rw-r--r--kernel/include/linux/kvm_host.h198
-rw-r--r--kernel/include/linux/kvm_irqfd.h71
-rw-r--r--kernel/include/linux/kvm_types.h1
-rw-r--r--kernel/include/linux/leds.h32
-rw-r--r--kernel/include/linux/lglock.h34
-rw-r--r--kernel/include/linux/libata.h6
-rw-r--r--kernel/include/linux/libfdt_env.h4
-rw-r--r--kernel/include/linux/libnvdimm.h155
-rw-r--r--kernel/include/linux/lightnvm.h441
-rw-r--r--kernel/include/linux/list.h10
-rw-r--r--kernel/include/linux/list_bl.h17
-rw-r--r--kernel/include/linux/list_nulls.h3
-rw-r--r--kernel/include/linux/livepatch.h8
-rw-r--r--kernel/include/linux/llist.h2
-rw-r--r--kernel/include/linux/locallock.h6
-rw-r--r--kernel/include/linux/lockd/lockd.h10
-rw-r--r--kernel/include/linux/lockdep.h16
-rw-r--r--kernel/include/linux/lsm_audit.h7
-rw-r--r--kernel/include/linux/lsm_hooks.h1890
-rw-r--r--kernel/include/linux/mailbox_client.h2
-rw-r--r--kernel/include/linux/mailbox_controller.h9
-rw-r--r--kernel/include/linux/marvell_phy.h1
-rw-r--r--kernel/include/linux/math64.h80
-rw-r--r--kernel/include/linux/mbus.h5
-rw-r--r--kernel/include/linux/mei_cl_bus.h85
-rw-r--r--kernel/include/linux/memblock.h75
-rw-r--r--kernel/include/linux/memcontrol.h563
-rw-r--r--kernel/include/linux/memory_hotplug.h7
-rw-r--r--kernel/include/linux/mfd/88pm80x.h163
-rw-r--r--kernel/include/linux/mfd/arizona/core.h12
-rw-r--r--kernel/include/linux/mfd/arizona/pdata.h28
-rw-r--r--kernel/include/linux/mfd/arizona/registers.h368
-rw-r--r--kernel/include/linux/mfd/axp20x.h189
-rw-r--r--kernel/include/linux/mfd/core.h10
-rw-r--r--kernel/include/linux/mfd/cros_ec.h87
-rw-r--r--kernel/include/linux/mfd/cros_ec_commands.h277
-rw-r--r--kernel/include/linux/mfd/da9052/reg.h3
-rw-r--r--kernel/include/linux/mfd/da9055/core.h2
-rw-r--r--kernel/include/linux/mfd/da9062/core.h50
-rw-r--r--kernel/include/linux/mfd/da9062/registers.h1108
-rw-r--r--kernel/include/linux/mfd/da9063/core.h1
-rw-r--r--kernel/include/linux/mfd/da9063/pdata.h1
-rw-r--r--kernel/include/linux/mfd/da9150/core.h19
-rw-r--r--kernel/include/linux/mfd/intel_bxtwc.h69
-rw-r--r--kernel/include/linux/mfd/intel_soc_pmic.h2
-rw-r--r--kernel/include/linux/mfd/lpc_ich.h6
-rw-r--r--kernel/include/linux/mfd/max77686.h5
-rw-r--r--kernel/include/linux/mfd/max77693-common.h49
-rw-r--r--kernel/include/linux/mfd/max77693-private.h134
-rw-r--r--kernel/include/linux/mfd/max77843-private.h174
-rw-r--r--kernel/include/linux/mfd/mt6397/core.h1
-rw-r--r--kernel/include/linux/mfd/palmas.h7
-rw-r--r--kernel/include/linux/mfd/rtsx_pci.h6
-rw-r--r--kernel/include/linux/mfd/samsung/core.h4
-rw-r--r--kernel/include/linux/mfd/samsung/s2mps11.h1
-rw-r--r--kernel/include/linux/mfd/samsung/s2mps13.h1
-rw-r--r--kernel/include/linux/mfd/stmpe.h44
-rw-r--r--kernel/include/linux/mfd/syscon/atmel-mc.h144
-rw-r--r--kernel/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h8
-rw-r--r--kernel/include/linux/mfd/syscon/imx7-iomuxc-gpr.h47
-rw-r--r--kernel/include/linux/mfd/tps6105x.h10
-rw-r--r--kernel/include/linux/mic_bus.h3
-rw-r--r--kernel/include/linux/microchipphy.h73
-rw-r--r--kernel/include/linux/miscdevice.h3
-rw-r--r--kernel/include/linux/mlx4/cmd.h6
-rw-r--r--kernel/include/linux/mlx4/cq.h3
-rw-r--r--kernel/include/linux/mlx4/device.h52
-rw-r--r--kernel/include/linux/mlx4/driver.h1
-rw-r--r--kernel/include/linux/mlx4/qp.h27
-rw-r--r--kernel/include/linux/mlx5/cq.h5
-rw-r--r--kernel/include/linux/mlx5/device.h230
-rw-r--r--kernel/include/linux/mlx5/driver.h238
-rw-r--r--kernel/include/linux/mlx5/flow_table.h54
-rw-r--r--kernel/include/linux/mlx5/mlx5_ifc.h6602
-rw-r--r--kernel/include/linux/mlx5/qp.h25
-rw-r--r--kernel/include/linux/mlx5/vport.h55
-rw-r--r--kernel/include/linux/mm-arch-hooks.h25
-rw-r--r--kernel/include/linux/mm.h269
-rw-r--r--kernel/include/linux/mm_types.h75
-rw-r--r--kernel/include/linux/mmc/card.h6
-rw-r--r--kernel/include/linux/mmc/core.h5
-rw-r--r--kernel/include/linux/mmc/dw_mmc.h38
-rw-r--r--kernel/include/linux/mmc/host.h70
-rw-r--r--kernel/include/linux/mmc/mmc.h4
-rw-r--r--kernel/include/linux/mmc/sdhci-pci-data.h2
-rw-r--r--kernel/include/linux/mmdebug.h1
-rw-r--r--kernel/include/linux/mmiotrace.h2
-rw-r--r--kernel/include/linux/mmu_notifier.h58
-rw-r--r--kernel/include/linux/mmzone.h158
-rw-r--r--kernel/include/linux/mod_devicetable.h30
-rw-r--r--kernel/include/linux/module.h174
-rw-r--r--kernel/include/linux/moduleparam.h112
-rw-r--r--kernel/include/linux/mpi.h25
-rw-r--r--kernel/include/linux/mpls_iptunnel.h6
-rw-r--r--kernel/include/linux/msi.h125
-rw-r--r--kernel/include/linux/msm_mdp.h79
-rw-r--r--kernel/include/linux/mtd/cfi.h188
-rw-r--r--kernel/include/linux/mtd/map.h2
-rw-r--r--kernel/include/linux/mtd/nand.h23
-rw-r--r--kernel/include/linux/mtd/spi-nor.h88
-rw-r--r--kernel/include/linux/n_r3964.h8
-rw-r--r--kernel/include/linux/namei.h41
-rw-r--r--kernel/include/linux/nd.h151
-rw-r--r--kernel/include/linux/net.h45
-rw-r--r--kernel/include/linux/netdev_features.h14
-rw-r--r--kernel/include/linux/netdevice.h269
-rw-r--r--kernel/include/linux/netfilter.h163
-rw-r--r--kernel/include/linux/netfilter/ipset/ip_set.h63
-rw-r--r--kernel/include/linux/netfilter/ipset/ip_set_comment.h38
-rw-r--r--kernel/include/linux/netfilter/ipset/ip_set_timeout.h27
-rw-r--r--kernel/include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--kernel/include/linux/netfilter/nfnetlink.h8
-rw-r--r--kernel/include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--kernel/include/linux/netfilter/x_tables.h71
-rw-r--r--kernel/include/linux/netfilter_arp/arp_tables.h1
-rw-r--r--kernel/include/linux/netfilter_bridge.h21
-rw-r--r--kernel/include/linux/netfilter_bridge/ebtables.h8
-rw-r--r--kernel/include/linux/netfilter_defs.h9
-rw-r--r--kernel/include/linux/netfilter_ingress.h44
-rw-r--r--kernel/include/linux/netfilter_ipv4.h2
-rw-r--r--kernel/include/linux/netfilter_ipv4/ip_tables.h1
-rw-r--r--kernel/include/linux/netfilter_ipv6.h21
-rw-r--r--kernel/include/linux/netfilter_ipv6/ip6_tables.h1
-rw-r--r--kernel/include/linux/netlink.h15
-rw-r--r--kernel/include/linux/nfs4.h22
-rw-r--r--kernel/include/linux/nfs_fs.h14
-rw-r--r--kernel/include/linux/nfs_fs_sb.h10
-rw-r--r--kernel/include/linux/nfs_page.h1
-rw-r--r--kernel/include/linux/nfs_xdr.h88
-rw-r--r--kernel/include/linux/nmi.h25
-rw-r--r--kernel/include/linux/ntb.h969
-rw-r--r--kernel/include/linux/ntb_transport.h86
-rw-r--r--kernel/include/linux/nvme.h645
-rw-r--r--kernel/include/linux/nvmem-consumer.h157
-rw-r--r--kernel/include/linux/nvmem-provider.h47
-rw-r--r--kernel/include/linux/nx842.h11
-rw-r--r--kernel/include/linux/of.h30
-rw-r--r--kernel/include/linux/of_address.h7
-rw-r--r--kernel/include/linux/of_device.h9
-rw-r--r--kernel/include/linux/of_dma.h25
-rw-r--r--kernel/include/linux/of_fdt.h4
-rw-r--r--kernel/include/linux/of_gpio.h5
-rw-r--r--kernel/include/linux/of_graph.h8
-rw-r--r--kernel/include/linux/of_irq.h42
-rw-r--r--kernel/include/linux/of_pci.h4
-rw-r--r--kernel/include/linux/of_platform.h9
-rw-r--r--kernel/include/linux/oid_registry.h7
-rw-r--r--kernel/include/linux/omap-dma.h2
-rw-r--r--kernel/include/linux/once.h57
-rw-r--r--kernel/include/linux/oom.h50
-rw-r--r--kernel/include/linux/osq_lock.h5
-rw-r--r--kernel/include/linux/page-flags.h103
-rw-r--r--kernel/include/linux/page-isolation.h5
-rw-r--r--kernel/include/linux/page_counter.h6
-rw-r--r--kernel/include/linux/page_ext.h4
-rw-r--r--kernel/include/linux/page_idle.h110
-rw-r--r--kernel/include/linux/page_owner.h13
-rw-r--r--kernel/include/linux/pageblock-flags.h2
-rw-r--r--kernel/include/linux/pagemap.h16
-rw-r--r--kernel/include/linux/parport.h43
-rw-r--r--kernel/include/linux/pata_arasan_cf_data.h2
-rw-r--r--kernel/include/linux/pci-acpi.h24
-rw-r--r--kernel/include/linux/pci-ats.h49
-rw-r--r--kernel/include/linux/pci.h114
-rw-r--r--kernel/include/linux/pci_ids.h12
-rw-r--r--kernel/include/linux/percpu-defs.h6
-rw-r--r--kernel/include/linux/percpu-rwsem.h23
-rw-r--r--kernel/include/linux/perf/arm_pmu.h154
-rw-r--r--kernel/include/linux/perf_event.h189
-rw-r--r--kernel/include/linux/phy.h37
-rw-r--r--kernel/include/linux/phy/phy-sun4i-usb.h26
-rw-r--r--kernel/include/linux/phy/phy.h9
-rw-r--r--kernel/include/linux/phy_fixed.h8
-rw-r--r--kernel/include/linux/pinctrl/consumer.h2
-rw-r--r--kernel/include/linux/pinctrl/devinfo.h10
-rw-r--r--kernel/include/linux/pinctrl/pinconf-generic.h64
-rw-r--r--kernel/include/linux/pinctrl/pinctrl-state.h8
-rw-r--r--kernel/include/linux/pinctrl/pinctrl.h2
-rw-r--r--kernel/include/linux/pinctrl/pinmux.h6
-rw-r--r--kernel/include/linux/platform_data/atmel.h40
-rw-r--r--kernel/include/linux/platform_data/atmel_mxt_ts.h (renamed from kernel/include/linux/i2c/atmel_mxt_ts.h)12
-rw-r--r--kernel/include/linux/platform_data/clk-ux500.h12
-rw-r--r--kernel/include/linux/platform_data/dma-dw.h2
-rw-r--r--kernel/include/linux/platform_data/dma-hsu.h4
-rw-r--r--kernel/include/linux/platform_data/dma-rcar-audmapp.h34
-rw-r--r--kernel/include/linux/platform_data/edma.h104
-rw-r--r--kernel/include/linux/platform_data/gpio-ath79.h19
-rw-r--r--kernel/include/linux/platform_data/gpio-em.h11
-rw-r--r--kernel/include/linux/platform_data/gpio-omap.h12
-rw-r--r--kernel/include/linux/platform_data/i2c-mux-reg.h44
-rw-r--r--kernel/include/linux/platform_data/irq-renesas-irqc.h27
-rw-r--r--kernel/include/linux/platform_data/itco_wdt.h19
-rw-r--r--kernel/include/linux/platform_data/keyboard-spear.h2
-rw-r--r--kernel/include/linux/platform_data/leds-kirkwood-netxbig.h1
-rw-r--r--kernel/include/linux/platform_data/leds-kirkwood-ns2.h14
-rw-r--r--kernel/include/linux/platform_data/lp855x.h2
-rw-r--r--kernel/include/linux/platform_data/macb.h14
-rw-r--r--kernel/include/linux/platform_data/mdio-gpio.h (renamed from kernel/include/linux/mdio-gpio.h)3
-rw-r--r--kernel/include/linux/platform_data/mmc-esdhc-imx.h2
-rw-r--r--kernel/include/linux/platform_data/mtd-nand-pxa3xx.h27
-rw-r--r--kernel/include/linux/platform_data/nfcmrvl.h48
-rw-r--r--kernel/include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--kernel/include/linux/platform_data/pixcir_i2c_ts.h (renamed from kernel/include/linux/input/pixcir_ts.h)1
-rw-r--r--kernel/include/linux/platform_data/s3c-hsotg.h10
-rw-r--r--kernel/include/linux/platform_data/spi-davinci.h1
-rw-r--r--kernel/include/linux/platform_data/spi-mt65xx.h20
-rw-r--r--kernel/include/linux/platform_data/st-nci.h (renamed from kernel/include/linux/platform_data/st21nfcb.h)16
-rw-r--r--kernel/include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--kernel/include/linux/platform_data/video-ep93xx.h8
-rw-r--r--kernel/include/linux/platform_data/video-msm_fb.h146
-rw-r--r--kernel/include/linux/platform_data/wkup_m3.h30
-rw-r--r--kernel/include/linux/platform_data/zforce_ts.h3
-rw-r--r--kernel/include/linux/platform_device.h31
-rw-r--r--kernel/include/linux/pm.h15
-rw-r--r--kernel/include/linux/pm_clock.h10
-rw-r--r--kernel/include/linux/pm_domain.h79
-rw-r--r--kernel/include/linux/pm_opp.h50
-rw-r--r--kernel/include/linux/pm_qos.h5
-rw-r--r--kernel/include/linux/pm_runtime.h6
-rw-r--r--kernel/include/linux/pm_wakeirq.h51
-rw-r--r--kernel/include/linux/pm_wakeup.h9
-rw-r--r--kernel/include/linux/pmem.h181
-rw-r--r--kernel/include/linux/poison.h11
-rw-r--r--kernel/include/linux/power/bq27x00_battery.h19
-rw-r--r--kernel/include/linux/power/bq27xxx_battery.h31
-rw-r--r--kernel/include/linux/power/charger-manager.h8
-rw-r--r--kernel/include/linux/power/max17042_battery.h4
-rw-r--r--kernel/include/linux/power_supply.h10
-rw-r--r--kernel/include/linux/pps_kernel.h16
-rw-r--r--kernel/include/linux/pr.h18
-rw-r--r--kernel/include/linux/preempt.h169
-rw-r--r--kernel/include/linux/preempt_mask.h126
-rw-r--r--kernel/include/linux/printk.h22
-rw-r--r--kernel/include/linux/property.h18
-rw-r--r--kernel/include/linux/proportions.h4
-rw-r--r--kernel/include/linux/psci.h54
-rw-r--r--kernel/include/linux/pstore.h14
-rw-r--r--kernel/include/linux/ptp_classify.h7
-rw-r--r--kernel/include/linux/ptrace.h25
-rw-r--r--kernel/include/linux/pwm.h114
-rw-r--r--kernel/include/linux/pxa2xx_ssp.h5
-rw-r--r--kernel/include/linux/qcom_scm.h15
-rw-r--r--kernel/include/linux/qed/common_hsi.h609
-rw-r--r--kernel/include/linux/qed/eth_common.h279
-rw-r--r--kernel/include/linux/qed/qed_chain.h540
-rw-r--r--kernel/include/linux/qed/qed_eth_if.h165
-rw-r--r--kernel/include/linux/qed/qed_if.h498
-rw-r--r--kernel/include/linux/quotaops.h5
-rw-r--r--kernel/include/linux/radix-tree.h22
-rw-r--r--kernel/include/linux/random.h15
-rw-r--r--kernel/include/linux/rbtree.h21
-rw-r--r--kernel/include/linux/rbtree_augmented.h21
-rw-r--r--kernel/include/linux/rbtree_latch.h212
-rw-r--r--kernel/include/linux/rcu_sync.h86
-rw-r--r--kernel/include/linux/rculist.h15
-rw-r--r--kernel/include/linux/rcupdate.h255
-rw-r--r--kernel/include/linux/rcutiny.h29
-rw-r--r--kernel/include/linux/rcutree.h13
-rw-r--r--kernel/include/linux/regmap.h410
-rw-r--r--kernel/include/linux/regulator/consumer.h16
-rw-r--r--kernel/include/linux/regulator/da9211.h19
-rw-r--r--kernel/include/linux/regulator/driver.h14
-rw-r--r--kernel/include/linux/regulator/machine.h10
-rw-r--r--kernel/include/linux/regulator/max8973-regulator.h4
-rw-r--r--kernel/include/linux/regulator/mt6311.h29
-rw-r--r--kernel/include/linux/remoteproc.h9
-rw-r--r--kernel/include/linux/reset.h14
-rw-r--r--kernel/include/linux/reset/bcm63xx_pmb.h88
-rw-r--r--kernel/include/linux/rhashtable.h18
-rw-r--r--kernel/include/linux/ring_buffer.h4
-rw-r--r--kernel/include/linux/rio.h2
-rw-r--r--kernel/include/linux/rmap.h17
-rw-r--r--kernel/include/linux/rotary_encoder.h3
-rw-r--r--kernel/include/linux/rtc.h20
-rw-r--r--kernel/include/linux/rtc/sirfsoc_rtciobrg.h4
-rw-r--r--kernel/include/linux/rtmutex.h2
-rw-r--r--kernel/include/linux/rtnetlink.h22
-rw-r--r--kernel/include/linux/rwsem_rt.h26
-rw-r--r--kernel/include/linux/scatterlist.h56
-rw-r--r--kernel/include/linux/sched.h467
-rw-r--r--kernel/include/linux/sched/deadline.h5
-rw-r--r--kernel/include/linux/sched/sysctl.h12
-rw-r--r--kernel/include/linux/scif.h1339
-rw-r--r--kernel/include/linux/scpi_protocol.h78
-rw-r--r--kernel/include/linux/seccomp.h13
-rw-r--r--kernel/include/linux/security.h1630
-rw-r--r--kernel/include/linux/seq_file.h24
-rw-r--r--kernel/include/linux/seqlock.h128
-rw-r--r--kernel/include/linux/serial_8250.h10
-rw-r--r--kernel/include/linux/serial_core.h2
-rw-r--r--kernel/include/linux/serial_sci.h86
-rw-r--r--kernel/include/linux/serio.h2
-rw-r--r--kernel/include/linux/shdma-base.h5
-rw-r--r--kernel/include/linux/shmem_fs.h5
-rw-r--r--kernel/include/linux/signal.h1
-rw-r--r--kernel/include/linux/skbuff.h242
-rw-r--r--kernel/include/linux/slab.h59
-rw-r--r--kernel/include/linux/smpboot.h20
-rw-r--r--kernel/include/linux/soc/brcmstb/brcmstb.h10
-rw-r--r--kernel/include/linux/soc/dove/pmu.h6
-rw-r--r--kernel/include/linux/soc/mediatek/infracfg.h26
-rw-r--r--kernel/include/linux/soc/qcom/smd-rpm.h35
-rw-r--r--kernel/include/linux/soc/qcom/smd.h57
-rw-r--r--kernel/include/linux/soc/qcom/smem.h11
-rw-r--r--kernel/include/linux/soc/sunxi/sunxi_sram.h19
-rw-r--r--kernel/include/linux/sock_diag.h42
-rw-r--r--kernel/include/linux/spi/cc2520.h1
-rw-r--r--kernel/include/linux/spi/pxa2xx_spi.h1
-rw-r--r--kernel/include/linux/spi/spi.h108
-rw-r--r--kernel/include/linux/spi/spi_bitbang.h2
-rw-r--r--kernel/include/linux/spinlock.h42
-rw-r--r--kernel/include/linux/spinlock_rt.h25
-rw-r--r--kernel/include/linux/spmi.h4
-rw-r--r--kernel/include/linux/srcu.h5
-rw-r--r--kernel/include/linux/ssb/ssb.h8
-rw-r--r--kernel/include/linux/stddef.h8
-rw-r--r--kernel/include/linux/stm.h126
-rw-r--r--kernel/include/linux/stmmac.h23
-rw-r--r--kernel/include/linux/stop_machine.h36
-rw-r--r--kernel/include/linux/string.h4
-rw-r--r--kernel/include/linux/string_helpers.h14
-rw-r--r--kernel/include/linux/sunrpc/addr.h27
-rw-r--r--kernel/include/linux/sunrpc/auth.h8
-rw-r--r--kernel/include/linux/sunrpc/bc_xprt.h6
-rw-r--r--kernel/include/linux/sunrpc/cache.h25
-rw-r--r--kernel/include/linux/sunrpc/clnt.h1
-rw-r--r--kernel/include/linux/sunrpc/sched.h19
-rw-r--r--kernel/include/linux/sunrpc/svc.h68
-rw-r--r--kernel/include/linux/sunrpc/svc_rdma.h101
-rw-r--r--kernel/include/linux/sunrpc/svc_xprt.h1
-rw-r--r--kernel/include/linux/sunrpc/xprt.h48
-rw-r--r--kernel/include/linux/sunrpc/xprtrdma.h5
-rw-r--r--kernel/include/linux/sunrpc/xprtsock.h2
-rw-r--r--kernel/include/linux/sunxi-rsb.h105
-rw-r--r--kernel/include/linux/suspend.h45
-rw-r--r--kernel/include/linux/sw842.h12
-rw-r--r--kernel/include/linux/swait.h173
-rw-r--r--kernel/include/linux/swap.h24
-rw-r--r--kernel/include/linux/swapops.h37
-rw-r--r--kernel/include/linux/syscalls.h25
-rw-r--r--kernel/include/linux/sysfs.h29
-rw-r--r--kernel/include/linux/syslog.h6
-rw-r--r--kernel/include/linux/t10-pi.h8
-rw-r--r--kernel/include/linux/tcp.h61
-rw-r--r--kernel/include/linux/thermal.h147
-rw-r--r--kernel/include/linux/ti_wilink_st.h2
-rw-r--r--kernel/include/linux/tick.h60
-rw-r--r--kernel/include/linux/time64.h37
-rw-r--r--kernel/include/linux/timekeeper_internal.h19
-rw-r--r--kernel/include/linux/timekeeping.h16
-rw-r--r--kernel/include/linux/timer.h63
-rw-r--r--kernel/include/linux/timerqueue.h8
-rw-r--r--kernel/include/linux/timex.h2
-rw-r--r--kernel/include/linux/topology.h6
-rw-r--r--kernel/include/linux/tpm.h26
-rw-r--r--kernel/include/linux/trace_events.h (renamed from kernel/include/linux/ftrace_event.h)206
-rw-r--r--kernel/include/linux/tracehook.h3
-rw-r--r--kernel/include/linux/tracepoint.h55
-rw-r--r--kernel/include/linux/tty.h63
-rw-r--r--kernel/include/linux/tty_driver.h2
-rw-r--r--kernel/include/linux/types.h21
-rw-r--r--kernel/include/linux/u64_stats_sync.h7
-rw-r--r--kernel/include/linux/uaccess.h42
-rw-r--r--kernel/include/linux/ucs2_string.h4
-rw-r--r--kernel/include/linux/ulpi/driver.h60
-rw-r--r--kernel/include/linux/ulpi/interface.h23
-rw-r--r--kernel/include/linux/ulpi/regs.h130
-rw-r--r--kernel/include/linux/uprobes.h19
-rw-r--r--kernel/include/linux/usb.h8
-rw-r--r--kernel/include/linux/usb/cdc.h51
-rw-r--r--kernel/include/linux/usb/cdc_ncm.h8
-rw-r--r--kernel/include/linux/usb/ch9.h11
-rw-r--r--kernel/include/linux/usb/chipidea.h39
-rw-r--r--kernel/include/linux/usb/composite.h2
-rw-r--r--kernel/include/linux/usb/gadget.h227
-rw-r--r--kernel/include/linux/usb/gadget_configfs.h19
-rw-r--r--kernel/include/linux/usb/hcd.h34
-rw-r--r--kernel/include/linux/usb/msm_hsusb.h29
-rw-r--r--kernel/include/linux/usb/msm_hsusb_hw.h9
-rw-r--r--kernel/include/linux/usb/musb.h2
-rw-r--r--kernel/include/linux/usb/net2280.h3
-rw-r--r--kernel/include/linux/usb/of.h19
-rw-r--r--kernel/include/linux/usb/otg.h24
-rw-r--r--kernel/include/linux/usb/phy.h16
-rw-r--r--kernel/include/linux/usb/quirks.h3
-rw-r--r--kernel/include/linux/usb/renesas_usbhs.h5
-rw-r--r--kernel/include/linux/usb/ulpi.h134
-rw-r--r--kernel/include/linux/usb/usb338x.h4
-rw-r--r--kernel/include/linux/userfaultfd_k.h85
-rw-r--r--kernel/include/linux/verify_pefile.h6
-rw-r--r--kernel/include/linux/vga_switcheroo.h100
-rw-r--r--kernel/include/linux/virtio_byteorder.h24
-rw-r--r--kernel/include/linux/virtio_config.h18
-rw-r--r--kernel/include/linux/vm_event_item.h4
-rw-r--r--kernel/include/linux/vmalloc.h12
-rw-r--r--kernel/include/linux/vme.h5
-rw-r--r--kernel/include/linux/vmstat.h31
-rw-r--r--kernel/include/linux/vringh.h18
-rw-r--r--kernel/include/linux/wait-simple.h207
-rw-r--r--kernel/include/linux/wait.h40
-rw-r--r--kernel/include/linux/watchdog.h34
-rw-r--r--kernel/include/linux/workqueue.h37
-rw-r--r--kernel/include/linux/writeback.h226
-rw-r--r--kernel/include/linux/xattr.h18
-rw-r--r--kernel/include/linux/zbud.h2
-rw-r--r--kernel/include/linux/zpool.h15
-rw-r--r--kernel/include/linux/zsmalloc.h8
-rw-r--r--kernel/include/linux/zutil.h4
636 files changed, 35664 insertions, 12128 deletions
diff --git a/kernel/include/linux/acpi.h b/kernel/include/linux/acpi.h
index 808c43afa..1991aea2e 100644
--- a/kernel/include/linux/acpi.h
+++ b/kernel/include/linux/acpi.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -53,14 +49,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
return adev ? adev->handle : NULL;
}
-#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode)
+#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
static inline bool has_acpi_companion(struct device *dev)
{
- return is_acpi_node(dev->fwnode);
+ return is_acpi_device_node(dev->fwnode);
}
static inline void acpi_preset_companion(struct device *dev,
@@ -122,6 +131,12 @@ static inline void acpi_initrd_override(void *data, size_t size)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ int count;
+};
+
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -137,9 +152,16 @@ int __init acpi_parse_entries(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
int entry_id, unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
+ int entry_id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries);
+int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+ int entry_id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries);
+int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
+ struct acpi_subtable_proc *proc, int proc_num,
+ unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
@@ -158,6 +180,16 @@ typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
#endif
+static inline bool invalid_logical_cpuid(u32 cpuid)
+{
+ return (int)cpuid < 0;
+}
+
+static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
+{
+ return phys_id == PHYS_CPUID_INVALID;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
@@ -174,6 +206,12 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
+extern unsigned int acpi_sci_irq;
+#define INVALID_ACPI_IRQ ((unsigned)-1)
+static inline bool acpi_sci_irq_valid(void)
+{
+ return acpi_sci_irq != INVALID_ACPI_IRQ;
+}
extern int sbf_port;
extern unsigned long acpi_realmode_flags;
@@ -182,6 +220,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
+void acpi_set_irq_model(enum acpi_irq_model_id model,
+ struct fwnode_handle *fwnode);
+
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
@@ -198,6 +239,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
@@ -243,54 +285,21 @@ extern bool wmi_has_guid(const char *guid);
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
-#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-
-extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern void acpi_video_dmi_promote_vendor(void);
-extern void acpi_video_dmi_demote_vendor(void);
-extern int acpi_video_backlight_support(void);
-extern int acpi_video_display_switch_support(void);
-
-#else
-
-static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
-{
- return 0;
-}
-
-static inline long acpi_is_video_device(acpi_handle handle)
-{
- return 0;
-}
-
-static inline void acpi_video_dmi_promote_vendor(void)
-{
-}
-
-static inline void acpi_video_dmi_demote_vendor(void)
-{
-}
-
-static inline int acpi_video_backlight_support(void)
-{
- return 0;
-}
-
-static inline int acpi_video_display_switch_support(void)
-{
- return 0;
-}
-
-#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
-
extern int acpi_blacklisted(void);
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
+extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
+int acpi_map_pxm_to_online_node(int pxm);
int acpi_get_node(acpi_handle handle);
#else
+static inline int acpi_map_pxm_to_online_node(int pxm)
+{
+ return 0;
+}
static inline int acpi_get_node(acpi_handle handle)
{
return 0;
@@ -466,6 +475,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
struct fwnode_handle;
@@ -474,7 +484,22 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -489,6 +514,11 @@ static inline bool has_acpi_companion(struct device *dev)
return false;
}
+static inline void acpi_preset_companion(struct device *dev,
+ struct acpi_device *parent, u64 addr)
+{
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
@@ -571,6 +601,16 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline bool acpi_dma_supported(struct acpi_device *adev)
+{
+ return false;
+}
+
+static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
+{
+ return DEV_DMA_NOT_SUPPORTED;
+}
+
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
@@ -723,6 +763,8 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
if (adev)
adev->driver_gpios = NULL;
}
+
+int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -730,6 +772,11 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
return -ENXIO;
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return -ENXIO;
+}
#endif
/* Device properties */
@@ -744,22 +791,76 @@ struct acpi_reference_args {
#ifdef CONFIG_ACPI
int acpi_dev_get_property(struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
- acpi_object_type type,
- const union acpi_object **obj);
-int acpi_dev_get_property_reference(struct acpi_device *adev,
- const char *name, size_t index,
- struct acpi_reference_args *args);
-
-int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
- void **valptr);
+int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args);
+
+int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
+ void **valptr);
int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val);
+int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
+ enum dev_prop_type proptype, void *val, size_t nval);
int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct acpi_device *acpi_get_next_child(struct device *dev,
- struct acpi_device *child);
+struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
+ struct fwnode_handle *subnode);
+
+struct acpi_probe_entry;
+typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
+ struct acpi_probe_entry *);
+
+#define ACPI_TABLE_ID_LEN 5
+
+/**
+ * struct acpi_probe_entry - boot-time probing entry
+ * @id: ACPI table name
+ * @type: Optional subtable type to match
+ * (if @id contains subtables)
+ * @subtable_valid: Optional callback to check the validity of
+ * the subtable
+ * @probe_table: Callback to the driver being probed when table
+ * match is successful
+ * @probe_subtbl: Callback to the driver being probed when table and
+ * subtable match (and optional callback is successful)
+ * @driver_data: Sideband data provided back to the driver
+ */
+struct acpi_probe_entry {
+ __u8 id[ACPI_TABLE_ID_LEN];
+ __u8 type;
+ acpi_probe_entry_validate_subtbl subtable_valid;
+ union {
+ acpi_tbl_table_handler probe_table;
+ acpi_tbl_entry_handler probe_subtbl;
+ };
+ kernel_ulong_t driver_data;
+};
+
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
+ static const struct acpi_probe_entry __acpi_probe_##name \
+ __used __section(__##table##_acpi_probe_table) \
+ = { \
+ .id = table_id, \
+ .type = subtable, \
+ .subtable_valid = valid, \
+ .probe_table = (acpi_tbl_table_handler)fn, \
+ .driver_data = data, \
+ }
+
+#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table
+#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end
+
+int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr);
+
+#define acpi_probe_device_table(t) \
+ ({ \
+ extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \
+ ACPI_PROBE_TABLE_END(t); \
+ __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \
+ (&ACPI_PROBE_TABLE_END(t) - \
+ &ACPI_PROBE_TABLE(t))); \
+ })
#else
static inline int acpi_dev_get_property(struct acpi_device *adev,
const char *name, acpi_object_type type,
@@ -767,16 +868,17 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
{
return -ENXIO;
}
-static inline int acpi_dev_get_property_array(struct acpi_device *adev,
- const char *name,
- acpi_object_type type,
- const union acpi_object **obj)
+
+static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
- const char *name, const char *cells_name,
- size_t index, struct acpi_reference_args *args)
+
+static inline int acpi_node_prop_get(struct fwnode_handle *fwnode,
+ const char *propname,
+ void **valptr)
{
return -ENXIO;
}
@@ -796,6 +898,14 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
return -ENXIO;
}
+static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val, size_t nval)
+{
+ return -ENXIO;
+}
+
static inline int acpi_dev_prop_read(struct acpi_device *adev,
const char *propname,
enum dev_prop_type proptype,
@@ -804,12 +914,22 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
return -ENXIO;
}
-static inline struct acpi_device *acpi_get_next_child(struct device *dev,
- struct acpi_device *child)
+static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
+ struct fwnode_handle *subnode)
{
return NULL;
}
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+ static const void * __acpi_table_##name[] \
+ __attribute__((unused)) \
+ = { (void *) table_id, \
+ (void *) subtable, \
+ (void *) valid, \
+ (void *) fn, \
+ (void *) data }
+
+#define acpi_probe_device_table(t) ({ int __r = 0; __r;})
#endif
#endif /*_LINUX_ACPI_H*/
diff --git a/kernel/include/linux/acpi_irq.h b/kernel/include/linux/acpi_irq.h
deleted file mode 100644
index f10c87265..000000000
--- a/kernel/include/linux/acpi_irq.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_ACPI_IRQ_H
-#define _LINUX_ACPI_IRQ_H
-
-#include <linux/irq.h>
-
-#ifndef acpi_irq_init
-static inline void acpi_irq_init(void) { }
-#endif
-
-#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/kernel/include/linux/aer.h b/kernel/include/linux/aer.h
index 4fef65e57..744b997d6 100644
--- a/kernel/include/linux/aer.h
+++ b/kernel/include/linux/aer.h
@@ -42,6 +42,7 @@ struct aer_capability_regs {
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
+int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
#else
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
#endif
void cper_print_aer(struct pci_dev *dev, int cper_severity,
diff --git a/kernel/include/linux/alarmtimer.h b/kernel/include/linux/alarmtimer.h
index a899402a5..52f3b7da4 100644
--- a/kernel/include/linux/alarmtimer.h
+++ b/kernel/include/linux/alarmtimer.h
@@ -43,8 +43,8 @@ struct alarm {
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
diff --git a/kernel/include/linux/amba/bus.h b/kernel/include/linux/amba/bus.h
index 50fc66868..9006c4e75 100644
--- a/kernel/include/linux/amba/bus.h
+++ b/kernel/include/linux/amba/bus.h
@@ -41,8 +41,6 @@ struct amba_driver {
int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
- int (*suspend)(struct amba_device *, pm_message_t);
- int (*resume)(struct amba_device *);
const struct amba_id *id_table;
};
diff --git a/kernel/include/linux/amba/sp810.h b/kernel/include/linux/amba/sp810.h
index c7df89f99..58fe9e8b6 100644
--- a/kernel/include/linux/amba/sp810.h
+++ b/kernel/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
* ARM PrimeXsys System Controller SP810 header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/kernel/include/linux/arcdevice.h b/kernel/include/linux/arcdevice.h
deleted file mode 100644
index df0356220..000000000
--- a/kernel/include/linux/arcdevice.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. NET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Definitions used by the ARCnet driver.
- *
- * Authors: Avery Pennarun and David Woodhouse
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-#ifndef _LINUX_ARCDEVICE_H
-#define _LINUX_ARCDEVICE_H
-
-#include <asm/timex.h>
-#include <linux/if_arcnet.h>
-
-#ifdef __KERNEL__
-#include <linux/irqreturn.h>
-
-/*
- * RECON_THRESHOLD is the maximum number of RECON messages to receive
- * within one minute before printing a "cabling problem" warning. The
- * default value should be fine.
- *
- * After that, a "cabling restored" message will be printed on the next IRQ
- * if no RECON messages have been received for 10 seconds.
- *
- * Do not define RECON_THRESHOLD at all if you want to disable this feature.
- */
-#define RECON_THRESHOLD 30
-
-
-/*
- * Define this to the minimum "timeout" value. If a transmit takes longer
- * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large
- * network, or one with heavy network traffic, this timeout may need to be
- * increased. The larger it is, though, the longer it will be between
- * necessary transmits - don't set this too high.
- */
-#define TX_TIMEOUT (HZ * 200 / 1000)
-
-
-/* Display warnings about the driver being an ALPHA version. */
-#undef ALPHA_WARNING
-
-
-/*
- * Debugging bitflags: each option can be enabled individually.
- *
- * Note: only debug flags included in the ARCNET_DEBUG_MAX define will
- * actually be available. GCC will (at least, GCC 2.7.0 will) notice
- * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize
- * them out.
- */
-#define D_NORMAL 1 /* important operational info */
-#define D_EXTRA 2 /* useful, but non-vital information */
-#define D_INIT 4 /* show init/probe messages */
-#define D_INIT_REASONS 8 /* show reasons for discarding probes */
-#define D_RECON 32 /* print a message whenever token is lost */
-#define D_PROTO 64 /* debug auto-protocol support */
-/* debug levels below give LOTS of output during normal operation! */
-#define D_DURING 128 /* trace operations (including irq's) */
-#define D_TX 256 /* show tx packets */
-#define D_RX 512 /* show rx packets */
-#define D_SKB 1024 /* show skb's */
-#define D_SKB_SIZE 2048 /* show skb sizes */
-#define D_TIMING 4096 /* show time needed to copy buffers to card */
-#define D_DEBUG 8192 /* Very detailed debug line for line */
-
-#ifndef ARCNET_DEBUG_MAX
-#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */
-#endif
-
-#ifndef ARCNET_DEBUG
-#define ARCNET_DEBUG (D_NORMAL|D_EXTRA)
-#endif
-extern int arcnet_debug;
-
-/* macros to simplify debug checking */
-#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x))
-#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0)
-#define BUGMSG(x,msg,args...) \
- BUGMSG2(x, "%s%6s: " msg, \
- x==D_NORMAL ? KERN_WARNING \
- : x < D_DURING ? KERN_INFO : KERN_DEBUG, \
- dev->name , ## args)
-
-/* see how long a function call takes to run, expressed in CPU cycles */
-#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \
- unsigned long _x, _y; \
- _x = get_cycles(); \
- call; \
- _y = get_cycles(); \
- BUGMSG(D_TIMING, \
- "%s: %d bytes in %lu cycles == " \
- "%lu Kbytes/100Mcycle\n",\
- name, bytes, _y - _x, \
- 100000000 / 1024 * bytes / (_y - _x + 1));\
- } \
- else { \
- call;\
- }
-
-
-/*
- * Time needed to reset the card - in ms (milliseconds). This works on my
- * SMC PC100. I can't find a reference that tells me just how long I
- * should wait.
- */
-#define RESETtime (300)
-
-/*
- * These are the max/min lengths of packet payload, not including the
- * arc_hardware header, but definitely including the soft header.
- *
- * Note: packet sizes 254, 255, 256 are impossible because of the way
- * ARCnet registers work That's why RFC1201 defines "exception" packets.
- * In non-RFC1201 protocols, we have to just tack some extra bytes on the
- * end.
- */
-#define MTU 253 /* normal packet max size */
-#define MinTU 257 /* extended packet min size */
-#define XMTU 508 /* extended packet max size */
-
-/* status/interrupt mask bit fields */
-#define TXFREEflag 0x01 /* transmitter available */
-#define TXACKflag 0x02 /* transmitted msg. ackd */
-#define RECONflag 0x04 /* network reconfigured */
-#define TESTflag 0x08 /* test flag */
-#define EXCNAKflag 0x08 /* excesive nak flag */
-#define RESETflag 0x10 /* power-on-reset */
-#define RES1flag 0x20 /* reserved - usually set by jumper */
-#define RES2flag 0x40 /* reserved - usually set by jumper */
-#define NORXflag 0x80 /* receiver inhibited */
-
-/* Flags used for IO-mapped memory operations */
-#define AUTOINCflag 0x40 /* Increase location with each access */
-#define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */
-#define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */
-
-/* in the command register, the following bits have these meanings:
- * 0-2 command
- * 3-4 page number (for enable rcv/xmt command)
- * 7 receive broadcasts
- */
-#define NOTXcmd 0x01 /* disable transmitter */
-#define NORXcmd 0x02 /* disable receiver */
-#define TXcmd 0x03 /* enable transmitter */
-#define RXcmd 0x04 /* enable receiver */
-#define CONFIGcmd 0x05 /* define configuration */
-#define CFLAGScmd 0x06 /* clear flags */
-#define TESTcmd 0x07 /* load test flags */
-
-/* flags for "clear flags" command */
-#define RESETclear 0x08 /* power-on-reset */
-#define CONFIGclear 0x10 /* system reconfigured */
-
-#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */
-
-/* flags for "load test flags" command */
-#define TESTload 0x08 /* test flag (diagnostic) */
-
-/* byte deposited into first address of buffers on reset */
-#define TESTvalue 0321 /* that's octal for 0xD1 :) */
-
-/* for "enable receiver" command */
-#define RXbcasts 0x80 /* receive broadcasts */
-
-/* flags for "define configuration" command */
-#define NORMALconf 0x00 /* 1-249 byte packets */
-#define EXTconf 0x08 /* 250-504 byte packets */
-
-/* card feature flags, set during auto-detection.
- * (currently only used by com20020pci)
- */
-#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
-#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
- but default is 2.5MBit. */
-
-
-/* information needed to define an encapsulation driver */
-struct ArcProto {
- char suffix; /* a for RFC1201, e for ether-encap, etc. */
- int mtu; /* largest possible packet */
- int is_ip; /* This is a ip plugin - not a raw thing */
-
- void (*rx) (struct net_device * dev, int bufnum,
- struct archdr * pkthdr, int length);
- int (*build_header) (struct sk_buff * skb, struct net_device *dev,
- unsigned short ethproto, uint8_t daddr);
-
- /* these functions return '1' if the skb can now be freed */
- int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length,
- int bufnum);
- int (*continue_tx) (struct net_device * dev, int bufnum);
- int (*ack_tx) (struct net_device * dev, int acked);
-};
-
-extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
- *arc_bcast_proto, *arc_raw_proto;
-
-
-/*
- * "Incoming" is information needed for each address that could be sending
- * to us. Mostly for partially-received split packets.
- */
-struct Incoming {
- struct sk_buff *skb; /* packet data buffer */
- __be16 sequence; /* sequence number of assembly */
- uint8_t lastpacket, /* number of last packet (from 1) */
- numpackets; /* number of packets in split */
-};
-
-
-/* only needed for RFC1201 */
-struct Outgoing {
- struct ArcProto *proto; /* protocol driver that owns this:
- * if NULL, no packet is pending.
- */
- struct sk_buff *skb; /* buffer from upper levels */
- struct archdr *pkt; /* a pointer into the skb */
- uint16_t length, /* bytes total */
- dataleft, /* bytes left */
- segnum, /* segment being sent */
- numsegs; /* number of segments */
-};
-
-
-struct arcnet_local {
- uint8_t config, /* current value of CONFIG register */
- timeout, /* Extended timeout for COM20020 */
- backplane, /* Backplane flag for COM20020 */
- clockp, /* COM20020 clock divider */
- clockm, /* COM20020 clock multiplier flag */
- setup, /* Contents of setup1 register */
- setup2, /* Contents of setup2 register */
- intmask; /* current value of INTMASK register */
- uint8_t default_proto[256]; /* default encap to use for each host */
- int cur_tx, /* buffer used by current transmit, or -1 */
- next_tx, /* buffer where a packet is ready to send */
- cur_rx; /* current receive buffer */
- int lastload_dest, /* can last loaded packet be acked? */
- lasttrans_dest; /* can last TX'd packet be acked? */
- int timed_out; /* need to process TX timeout and drop packet */
- unsigned long last_timeout; /* time of last reported timeout */
- char *card_name; /* card ident string */
- int card_flags; /* special card features */
-
-
- /* On preemtive and SMB a lock is needed */
- spinlock_t lock;
-
- /*
- * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
- * which can be used for either sending or receiving. The new dynamic
- * buffer management routines use a simple circular queue of available
- * buffers, and take them as they're needed. This way, we simplify
- * situations in which we (for example) want to pre-load a transmit
- * buffer, or start receiving while we copy a received packet to
- * memory.
- *
- * The rules: only the interrupt handler is allowed to _add_ buffers to
- * the queue; thus, this doesn't require a lock. Both the interrupt
- * handler and the transmit function will want to _remove_ buffers, so
- * we need to handle the situation where they try to do it at the same
- * time.
- *
- * If next_buf == first_free_buf, the queue is empty. Since there are
- * only four possible buffers, the queue should never be full.
- */
- atomic_t buf_lock;
- int buf_queue[5];
- int next_buf, first_free_buf;
-
- /* network "reconfiguration" handling */
- unsigned long first_recon; /* time of "first" RECON message to count */
- unsigned long last_recon; /* time of most recent RECON */
- int num_recons; /* number of RECONs between first and last. */
- int network_down; /* do we think the network is down? */
-
- int excnak_pending; /* We just got an excesive nak interrupt */
-
- struct {
- uint16_t sequence; /* sequence number (incs with each packet) */
- __be16 aborted_seq;
-
- struct Incoming incoming[256]; /* one from each address */
- } rfc1201;
-
- /* really only used by rfc1201, but we'll pretend it's not */
- struct Outgoing outgoing; /* packet currently being sent */
-
- /* hardware-specific functions */
- struct {
- struct module *owner;
- void (*command) (struct net_device * dev, int cmd);
- int (*status) (struct net_device * dev);
- void (*intmask) (struct net_device * dev, int mask);
- int (*reset) (struct net_device * dev, int really_reset);
- void (*open) (struct net_device * dev);
- void (*close) (struct net_device * dev);
-
- void (*copy_to_card) (struct net_device * dev, int bufnum, int offset,
- void *buf, int count);
- void (*copy_from_card) (struct net_device * dev, int bufnum, int offset,
- void *buf, int count);
- } hw;
-
- void __iomem *mem_start; /* pointer to ioremap'ed MMIO */
-};
-
-
-#define ARCRESET(x) (lp->hw.reset(dev, (x)))
-#define ACOMMAND(x) (lp->hw.command(dev, (x)))
-#define ASTATUS() (lp->hw.status(dev))
-#define AINTMASK(x) (lp->hw.intmask(dev, (x)))
-
-
-
-#if ARCNET_DEBUG_MAX & D_SKB
-void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
-#else
-#define arcnet_dump_skb(dev,skb,desc) ;
-#endif
-
-void arcnet_unregister_proto(struct ArcProto *proto);
-irqreturn_t arcnet_interrupt(int irq, void *dev_id);
-struct net_device *alloc_arcdev(const char *name);
-
-int arcnet_open(struct net_device *dev);
-int arcnet_close(struct net_device *dev);
-netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-void arcnet_timeout(struct net_device *dev);
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_ARCDEVICE_H */
diff --git a/kernel/include/linux/asn1_ber_bytecode.h b/kernel/include/linux/asn1_ber_bytecode.h
index 945d44ae5..ab3a6c002 100644
--- a/kernel/include/linux/asn1_ber_bytecode.h
+++ b/kernel/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@ enum asn1_opcode {
ASN1_OP_MATCH_JUMP = 0x04,
ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
ASN1_OP_MATCH_ANY = 0x08,
+ ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
ASN1_OP_MATCH_ANY_ACT = 0x0a,
+ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
/* Everything before here matches unconditionally */
ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
ASN1_OP_COND_MATCH_ANY = 0x18,
+ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
+ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
/* Everything before here will want a tag from the data */
-#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
+#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
/* These are here to help fill up space */
- ASN1_OP_COND_FAIL = 0x1b,
- ASN1_OP_COMPLETE = 0x1c,
- ASN1_OP_ACT = 0x1d,
- ASN1_OP_RETURN = 0x1e,
+ ASN1_OP_COND_FAIL = 0x1c,
+ ASN1_OP_COMPLETE = 0x1d,
+ ASN1_OP_ACT = 0x1e,
+ ASN1_OP_MAYBE_ACT = 0x1f,
/* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
#define ASN1_OP_END__OF 0x02
#define ASN1_OP_END__ACT 0x04
+ ASN1_OP_RETURN = 0x28,
+
ASN1_OP__NR
};
diff --git a/kernel/include/linux/ata.h b/kernel/include/linux/ata.h
index 5dfbcd888..c1a2f345c 100644
--- a/kernel/include/linux/ata.h
+++ b/kernel/include/linux/ata.h
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
};
enum ata_ioctls {
- ATA_IOC_GET_IO32 = 0x309,
- ATA_IOC_SET_IO32 = 0x324,
+ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
+ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
};
/* core structures */
@@ -701,9 +701,19 @@ static inline bool ata_id_wcache_enabled(const u16 *id)
static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
{
+ /* Word 86 must have bit 15 set */
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 3);
+
+ /* READ LOG DMA EXT support can be signaled either from word 119
+ * or from word 120. The format is the same for both words: Bit
+ * 15 must be cleared, bit 14 set and bit 3 set.
+ */
+ if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 ||
+ (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008)
+ return true;
+
+ return false;
}
/**
diff --git a/kernel/include/linux/atmel_serial.h b/kernel/include/linux/atmel_serial.h
index 00beddf6b..ee696d7e8 100644
--- a/kernel/include/linux/atmel_serial.h
+++ b/kernel/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
-#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
-#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
-#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
-#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
-#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
-#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
-#define ATMEL_US_SENDA (1 << 12) /* Send Address */
-#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
-#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
-#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
-#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
+#define ATMEL_US_CR 0x00 /* Control Register */
+#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
+#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
+#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
+#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
+#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
+#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
+#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
+#define ATMEL_US_STTBRK BIT(9) /* Start Break */
+#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
+#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
+#define ATMEL_US_SENDA BIT(12) /* Send Address */
+#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
+#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
+#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
+#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
+#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
+#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
+#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
+#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
+#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
+#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
+#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
+#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL (3 << 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR (7 << 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
-#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
-#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
-#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
-#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
+#define ATMEL_US_MR 0x04 /* Mode Register */
+#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
+#define ATMEL_US_USMODE_NORMAL 0
+#define ATMEL_US_USMODE_RS485 1
+#define ATMEL_US_USMODE_HWHS 2
+#define ATMEL_US_USMODE_MODEM 3
+#define ATMEL_US_USMODE_ISO7816_T0 4
+#define ATMEL_US_USMODE_ISO7816_T1 6
+#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
+#define ATMEL_US_USCLKS_MCK (0 << 4)
+#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
+#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
+#define ATMEL_US_CHRL_5 (0 << 6)
+#define ATMEL_US_CHRL_6 (1 << 6)
+#define ATMEL_US_CHRL_7 (2 << 6)
+#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
+#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
+#define ATMEL_US_PAR_EVEN (0 << 9)
+#define ATMEL_US_PAR_ODD (1 << 9)
+#define ATMEL_US_PAR_SPACE (2 << 9)
+#define ATMEL_US_PAR_MARK (3 << 9)
+#define ATMEL_US_PAR_NONE (4 << 9)
+#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
+#define ATMEL_US_NBSTOP_1 (0 << 12)
+#define ATMEL_US_NBSTOP_1_5 (1 << 12)
+#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
+#define ATMEL_US_CHMODE_NORMAL (0 << 14)
+#define ATMEL_US_CHMODE_ECHO (1 << 14)
+#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
+#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_MSBF BIT(16) /* Bit Order */
+#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
+#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
+#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
+#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
+#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
+#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
+#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
-#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
-#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
-#define ATMEL_US_PARE (1 << 7) /* Parity Error */
-#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
-#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
-#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
-#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
-#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
-#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
-#define ATMEL_US_RI (1 << 20) /* RI */
-#define ATMEL_US_DSR (1 << 21) /* DSR */
-#define ATMEL_US_DCD (1 << 22) /* DCD */
-#define ATMEL_US_CTS (1 << 23) /* CTS */
+#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
+#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
+#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
+#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
+#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
+#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
+#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
+#define ATMEL_US_FRAME BIT(6) /* Framing Error */
+#define ATMEL_US_PARE BIT(7) /* Parity Error */
+#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
+#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
+#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
+#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
+#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
+#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
+#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
+#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
+#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
+#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
+#define ATMEL_US_RI BIT(20) /* RI */
+#define ATMEL_US_DSR BIT(21) /* DSR */
+#define ATMEL_US_DCD BIT(22) /* DCD */
+#define ATMEL_US_CTS BIT(23) /* CTS */
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
+#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
+#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
+#define ATMEL_US_CSR 0x14 /* Channel Status Register */
+#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
+#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
+#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
+#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
+#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
-#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
+#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
+#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
+#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
+#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
+#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
+#define ATMEL_US_NER 0x44 /* Number of Errors Register */
+#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-#define ATMEL_US_NAME 0xf0 /* Ip Name */
-#define ATMEL_US_VERSION 0xfc /* Ip Version */
+#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
+#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
+#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
+#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
+#define ATMEL_US_ONE_DATA 0x0
+#define ATMEL_US_TWO_DATA 0x1
+#define ATMEL_US_FOUR_DATA 0x2
+#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
+#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
+#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
+#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
+
+#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
+#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
+#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
+
+#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
+#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
+#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
+#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
+#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
+#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
+#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
+#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
+#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
+#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
+#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
+#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
+#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
+#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
+
+#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif
diff --git a/kernel/include/linux/atmel_tc.h b/kernel/include/linux/atmel_tc.h
index b87c1c7c2..468fdfa64 100644
--- a/kernel/include/linux/atmel_tc.h
+++ b/kernel/include/linux/atmel_tc.h
@@ -67,6 +67,7 @@ struct atmel_tc {
const struct atmel_tcb_config *tcb_config;
int irq[3];
struct clk *clk[3];
+ struct clk *slow_clk;
struct list_head node;
bool allocated;
};
diff --git a/kernel/include/linux/atomic.h b/kernel/include/linux/atomic.h
index 5b08a8540..301de78d6 100644
--- a/kernel/include/linux/atomic.h
+++ b/kernel/include/linux/atomic.h
@@ -2,6 +2,426 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+#define __atomic_op_release(op, args...) \
+({ \
+ smp_mb__before_atomic(); \
+ op##_relaxed(args); \
+})
+
+#define __atomic_op_fence(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret; \
+ smp_mb__before_atomic(); \
+ __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_relaxed atomic_add_return
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define atomic_add_return_acquire(...) \
+ __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define atomic_add_return_release(...) \
+ __atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define atomic_add_return(...) \
+ __atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_inc_return_relaxed */
+#ifndef atomic_inc_return_relaxed
+#define atomic_inc_return_relaxed atomic_inc_return
+#define atomic_inc_return_acquire atomic_inc_return
+#define atomic_inc_return_release atomic_inc_return
+
+#else /* atomic_inc_return_relaxed */
+
+#ifndef atomic_inc_return_acquire
+#define atomic_inc_return_acquire(...) \
+ __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return_release
+#define atomic_inc_return_release(...) \
+ __atomic_op_release(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(...) \
+ __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic_inc_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define atomic_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define atomic_sub_return_release(...) \
+ __atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define atomic_sub_return(...) \
+ __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_dec_return_relaxed */
+#ifndef atomic_dec_return_relaxed
+#define atomic_dec_return_relaxed atomic_dec_return
+#define atomic_dec_return_acquire atomic_dec_return
+#define atomic_dec_return_release atomic_dec_return
+
+#else /* atomic_dec_return_relaxed */
+
+#ifndef atomic_dec_return_acquire
+#define atomic_dec_return_acquire(...) \
+ __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return_release
+#define atomic_dec_return_release(...) \
+ __atomic_op_release(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(...) \
+ __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic_dec_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_relaxed atomic_xchg
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define atomic_xchg_acquire(...) \
+ __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define atomic_xchg_release(...) \
+ __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define atomic_xchg(...) \
+ __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define atomic_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define atomic_cmpxchg_release(...) \
+ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(...) \
+ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_relaxed atomic64_add_return
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define atomic64_add_return_acquire(...) \
+ __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define atomic64_add_return_release(...) \
+ __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_inc_return_relaxed */
+#ifndef atomic64_inc_return_relaxed
+#define atomic64_inc_return_relaxed atomic64_inc_return
+#define atomic64_inc_return_acquire atomic64_inc_return
+#define atomic64_inc_return_release atomic64_inc_return
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+#define atomic64_inc_return_acquire(...) \
+ __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return_release
+#define atomic64_inc_return_release(...) \
+ __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(...) \
+ __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_inc_return_relaxed */
+
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define atomic64_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define atomic64_sub_return_release(...) \
+ __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define atomic64_sub_return(...) \
+ __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_dec_return_relaxed */
+#ifndef atomic64_dec_return_relaxed
+#define atomic64_dec_return_relaxed atomic64_dec_return
+#define atomic64_dec_return_acquire atomic64_dec_return
+#define atomic64_dec_return_release atomic64_dec_return
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+#define atomic64_dec_return_acquire(...) \
+ __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return_release
+#define atomic64_dec_return_release(...) \
+ __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(...) \
+ __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_dec_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_relaxed atomic64_xchg
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define atomic64_xchg_acquire(...) \
+ __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define atomic64_xchg_release(...) \
+ __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define atomic64_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define atomic64_cmpxchg_release(...) \
+ __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+ __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+ __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+ __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+ __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+ __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+ __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define xchg_relaxed xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
/**
* atomic_add_unless - add unless the number is already a given value
@@ -28,6 +448,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+ atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
+
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -111,21 +548,17 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
- int old;
- int new;
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
- do {
- old = atomic_read(v);
- new = old | i;
- } while (atomic_cmpxchg(v, old, new) != old);
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+ atomic64_and(~i, v);
}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
+#endif
#include <asm-generic/atomic-long.h>
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/kernel/include/linux/audit.h b/kernel/include/linux/audit.h
index c2e7e3a83..20eba1eb0 100644
--- a/kernel/include/linux/audit.h
+++ b/kernel/include/linux/audit.h
@@ -27,6 +27,9 @@
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#define AUDIT_INO_UNSET ((unsigned long)-1)
+#define AUDIT_DEV_UNSET ((dev_t)-1)
+
struct audit_sig_info {
uid_t uid;
pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
struct audit_field *inode_f; /* quick access to an inode field */
struct audit_watch *watch; /* associated watch */
struct audit_tree *tree; /* associated watched tree */
+ struct audit_fsnotify_mark *exe;
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
struct list_head list; /* for AUDIT_LIST* purposes only */
u64 prio;
@@ -139,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent,
extern void __audit_seccomp(unsigned long syscall, long signr, int code);
extern void __audit_ptrace(struct task_struct *t);
-static inline int audit_dummy_context(void)
+static inline bool audit_dummy_context(void)
{
void *p = current->audit_context;
return !p || *(int *)p;
@@ -341,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0,
{ }
static inline void audit_syscall_exit(void *pt_regs)
{ }
-static inline int audit_dummy_context(void)
+static inline bool audit_dummy_context(void)
{
- return 1;
+ return true;
}
static inline struct filename *audit_reusename(const __user char *name)
{
@@ -453,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp
extern __printf(2, 3)
void audit_log_format(struct audit_buffer *ab, const char *fmt, ...);
extern void audit_log_end(struct audit_buffer *ab);
-extern int audit_string_contains_control(const char *string,
+extern bool audit_string_contains_control(const char *string,
size_t len);
extern void audit_log_n_hex(struct audit_buffer *ab,
const unsigned char *buf,
diff --git a/kernel/include/linux/average.h b/kernel/include/linux/average.h
index c6028fd74..d04aa5828 100644
--- a/kernel/include/linux/average.h
+++ b/kernel/include/linux/average.h
@@ -3,28 +3,43 @@
/* Exponentially weighted moving average (EWMA) */
-/* For more documentation see lib/average.c */
-
-struct ewma {
- unsigned long internal;
- unsigned long factor;
- unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
- unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
- return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight) \
+ struct ewma_##name { \
+ unsigned long internal; \
+ }; \
+ static inline void ewma_##name##_init(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ e->internal = 0; \
+ } \
+ static inline unsigned long \
+ ewma_##name##_read(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ return e->internal >> ilog2(_factor); \
+ } \
+ static inline void ewma_##name##_add(struct ewma_##name *e, \
+ unsigned long val) \
+ { \
+ unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long weight = ilog2(_weight); \
+ unsigned long factor = ilog2(_factor); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ \
+ ACCESS_ONCE(e->internal) = internal ? \
+ (((internal << weight) - internal) + \
+ (val << factor)) >> weight : \
+ (val << factor); \
+ }
#endif /* _LINUX_AVERAGE_H */
diff --git a/kernel/include/linux/backing-dev-defs.h b/kernel/include/linux/backing-dev-defs.h
new file mode 100644
index 000000000..1b4d69f68
--- /dev/null
+++ b/kernel/include/linux/backing-dev-defs.h
@@ -0,0 +1,259 @@
+#ifndef __LINUX_BACKING_DEV_DEFS_H
+#define __LINUX_BACKING_DEV_DEFS_H
+
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
+#include <linux/flex_proportions.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+struct page;
+struct device;
+struct dentry;
+
+/*
+ * Bits in bdi_writeback.state
+ */
+enum wb_state {
+ WB_registered, /* bdi_register() was done */
+ WB_writeback_running, /* Writeback is in progress */
+ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+};
+
+enum wb_congested_state {
+ WB_async_congested, /* The async (write) queue is getting full */
+ WB_sync_congested, /* The sync queue is getting full */
+};
+
+typedef int (congested_fn)(void *, int);
+
+enum wb_stat_item {
+ WB_RECLAIMABLE,
+ WB_WRITEBACK,
+ WB_DIRTIED,
+ WB_WRITTEN,
+ NR_WB_STAT_ITEMS
+};
+
+#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+/*
+ * For cgroup writeback, multiple wb's may map to the same blkcg. Those
+ * wb's can operate mostly independently but should share the congested
+ * state. To facilitate such sharing, the congested state is tracked using
+ * the following struct which is created on demand, indexed by blkcg ID on
+ * its bdi, and refcounted.
+ */
+struct bdi_writeback_congested {
+ unsigned long state; /* WB_[a]sync_congested flags */
+ atomic_t refcnt; /* nr of attached wb's and blkg */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct backing_dev_info *bdi; /* the associated bdi */
+ int blkcg_id; /* ID of the associated blkcg */
+ struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
+#endif
+};
+
+/*
+ * Each wb (bdi_writeback) can perform writeback operations, is measured
+ * and throttled, independently. Without cgroup writeback, each bdi
+ * (bdi_writeback) is served by its embedded bdi->wb.
+ *
+ * On the default hierarchy, blkcg implicitly enables memcg. This allows
+ * using memcg's page ownership for attributing writeback IOs, and every
+ * memcg - blkcg combination can be served by its own wb by assigning a
+ * dedicated wb to each memcg, which enables isolation across different
+ * cgroups and propagation of IO back pressure down from the IO layer upto
+ * the tasks which are generating the dirty pages to be written back.
+ *
+ * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
+ * refcounted with the number of inodes attached to it, and pins the memcg
+ * and the corresponding blkcg. As the corresponding blkcg for a memcg may
+ * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
+ * is tested for blkcg after lookup and removed from index on mismatch so
+ * that a new wb for the combination can be created.
+ */
+struct bdi_writeback {
+ struct backing_dev_info *bdi; /* our parent bdi */
+
+ unsigned long state; /* Always use atomic bitops on this */
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+ struct list_head b_dirty_time; /* time stamps are dirty */
+ spinlock_t list_lock; /* protects the b_* lists */
+
+ struct percpu_counter stat[NR_WB_STAT_ITEMS];
+
+ struct bdi_writeback_congested *congested;
+
+ unsigned long bw_time_stamp; /* last time write bw is updated */
+ unsigned long dirtied_stamp;
+ unsigned long written_stamp; /* pages written at bw_time_stamp */
+ unsigned long write_bandwidth; /* the estimated write bandwidth */
+ unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
+
+ /*
+ * The base dirty throttle rate, re-calculated on every 200ms.
+ * All the bdi tasks' dirty rate will be curbed under it.
+ * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
+ * in small steps and is much more smooth/stable than the latter.
+ */
+ unsigned long dirty_ratelimit;
+ unsigned long balanced_dirty_ratelimit;
+
+ struct fprop_local_percpu completions;
+ int dirty_exceeded;
+
+ spinlock_t work_lock; /* protects work_list & dwork scheduling */
+ struct list_head work_list;
+ struct delayed_work dwork; /* work item used for writeback */
+
+ struct list_head bdi_node; /* anchored at bdi->wb_list */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct percpu_ref refcnt; /* used only for !root wb's */
+ struct fprop_local_percpu memcg_completions;
+ struct cgroup_subsys_state *memcg_css; /* the associated memcg */
+ struct cgroup_subsys_state *blkcg_css; /* and blkcg */
+ struct list_head memcg_node; /* anchored at memcg->cgwb_list */
+ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+
+ union {
+ struct work_struct release_work;
+ struct rcu_head rcu;
+ };
+#endif
+};
+
+struct backing_dev_info {
+ struct list_head bdi_list;
+ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned int capabilities; /* Device capabilities */
+ congested_fn *congested_fn; /* Function pointer if device is md/dm */
+ void *congested_data; /* Pointer to aux data for congested func */
+
+ char *name;
+
+ unsigned int min_ratio;
+ unsigned int max_ratio, max_prop_frac;
+
+ /*
+ * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
+ * any dirty wbs, which is depended upon by bdi_has_dirty().
+ */
+ atomic_long_t tot_write_bandwidth;
+
+ struct bdi_writeback wb; /* the root writeback info for this bdi */
+ struct list_head wb_list; /* list of all wbs */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+ struct rb_root cgwb_congested_tree; /* their congested states */
+ atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+#else
+ struct bdi_writeback_congested *wb_congested;
+#endif
+ wait_queue_head_t wb_waitq;
+
+ struct device *dev;
+
+ struct timer_list laptop_mode_wb_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_dir;
+ struct dentry *debug_stats;
+#endif
+};
+
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
+void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ clear_wb_congested(bdi->wb.congested, sync);
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ set_wb_congested(bdi->wb.congested, sync);
+}
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * wb_tryget - try to increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ return percpu_ref_tryget(&wb->refcnt);
+ return true;
+}
+
+/**
+ * wb_get - increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline void wb_get(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_get(&wb->refcnt);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_put(&wb->refcnt);
+}
+
+/**
+ * wb_dying - is a wb dying?
+ * @wb: bdi_writeback of interest
+ *
+ * Returns whether @wb is unlinked and being drained.
+ */
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return percpu_ref_is_dying(&wb->refcnt);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ return true;
+}
+
+static inline void wb_get(struct bdi_writeback *wb)
+{
+}
+
+static inline void wb_put(struct bdi_writeback *wb)
+{
+}
+
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return false;
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+#endif /* __LINUX_BACKING_DEV_DEFS_H */
diff --git a/kernel/include/linux/backing-dev.h b/kernel/include/linux/backing-dev.h
index d87d8eced..c82794f20 100644
--- a/kernel/include/linux/backing-dev.h
+++ b/kernel/include/linux/backing-dev.h
@@ -8,206 +8,120 @@
#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H
-#include <linux/percpu_counter.h>
-#include <linux/log2.h>
-#include <linux/flex_proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/timer.h>
+#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/atomic.h>
-#include <linux/sysctl.h>
-#include <linux/workqueue.h>
-
-struct page;
-struct device;
-struct dentry;
-
-/*
- * Bits in backing_dev_info.state
- */
-enum bdi_state {
- BDI_async_congested, /* The async (write) queue is getting full */
- BDI_sync_congested, /* The sync queue is getting full */
- BDI_registered, /* bdi_register() was done */
- BDI_writeback_running, /* Writeback is in progress */
-};
-
-typedef int (congested_fn)(void *, int);
-
-enum bdi_stat_item {
- BDI_RECLAIMABLE,
- BDI_WRITEBACK,
- BDI_DIRTIED,
- BDI_WRITTEN,
- NR_BDI_STAT_ITEMS
-};
-
-#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-struct bdi_writeback {
- struct backing_dev_info *bdi; /* our parent bdi */
-
- unsigned long last_old_flush; /* last old data flush */
-
- struct delayed_work dwork; /* work item used for writeback */
- struct list_head b_dirty; /* dirty inodes */
- struct list_head b_io; /* parked for writeback */
- struct list_head b_more_io; /* parked for more writeback */
- struct list_head b_dirty_time; /* time stamps are dirty */
- spinlock_t list_lock; /* protects the b_* lists */
-};
-
-struct backing_dev_info {
- struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
- unsigned long state; /* Always use atomic bitops on this */
- unsigned int capabilities; /* Device capabilities */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
-
- char *name;
-
- struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
-
- unsigned long bw_time_stamp; /* last time write bw is updated */
- unsigned long dirtied_stamp;
- unsigned long written_stamp; /* pages written at bw_time_stamp */
- unsigned long write_bandwidth; /* the estimated write bandwidth */
- unsigned long avg_write_bandwidth; /* further smoothed write bw */
-
- /*
- * The base dirty throttle rate, re-calculated on every 200ms.
- * All the bdi tasks' dirty rate will be curbed under it.
- * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
- * in small steps and is much more smooth/stable than the latter.
- */
- unsigned long dirty_ratelimit;
- unsigned long balanced_dirty_ratelimit;
-
- struct fprop_local_percpu completions;
- int dirty_exceeded;
-
- unsigned int min_ratio;
- unsigned int max_ratio, max_prop_frac;
-
- struct bdi_writeback wb; /* default writeback info for this bdi */
- spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
-
- struct list_head work_list;
-
- struct device *dev;
-
- struct timer_list laptop_mode_wb_timer;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debug_dir;
- struct dentry *debug_stats;
-#endif
-};
-
-struct backing_dev_info *inode_to_bdi(struct inode *inode);
+#include <linux/blk-cgroup.h>
+#include <linux/backing-dev-defs.h>
+#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_destroy(struct backing_dev_info *bdi);
+void bdi_exit(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+void bdi_unregister(struct backing_dev_info *bdi);
+
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- enum wb_reason reason);
-void bdi_start_background_writeback(struct backing_dev_info *bdi);
-void bdi_writeback_workfn(struct work_struct *work);
-int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
+void bdi_destroy(struct backing_dev_info *bdi);
+
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
+ bool range_cyclic, enum wb_reason reason);
+void wb_start_background_writeback(struct bdi_writeback *wb);
+void wb_workfn(struct work_struct *work);
+void wb_wakeup_delayed(struct bdi_writeback *wb);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
- return !list_empty(&wb->b_dirty) ||
- !list_empty(&wb->b_io) ||
- !list_empty(&wb->b_more_io);
+ return test_bit(WB_has_dirty_io, &wb->state);
}
-static inline void __add_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item, s64 amount)
+static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
{
- __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
+ /*
+ * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
+ * any dirty wbs. See wb_update_write_bandwidth().
+ */
+ return atomic_long_read(&bdi->tot_write_bandwidth);
+}
+
+static inline void __add_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item, s64 amount)
+{
+ __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __inc_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, 1);
+ __add_wb_stat(wb, item, 1);
}
-static inline void inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __inc_bdi_stat(bdi, item);
+ __inc_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __dec_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, -1);
+ __add_wb_stat(wb, item, -1);
}
-static inline void dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __dec_bdi_stat(bdi, item);
+ __dec_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline s64 bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- return percpu_counter_read_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_read_positive(&wb->stat[item]);
}
-static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_sum_positive(&wb->stat[item]);
}
-static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
{
s64 sum;
unsigned long flags;
local_irq_save(flags);
- sum = __bdi_stat_sum(bdi, item);
+ sum = __wb_stat_sum(wb, item);
local_irq_restore(flags);
return sum;
}
-extern void bdi_writeout_inc(struct backing_dev_info *bdi);
+extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
+static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
{
#ifdef CONFIG_SMP
- return nr_cpu_ids * BDI_STAT_BATCH;
+ return nr_cpu_ids * WB_STAT_BATCH;
#else
return 1;
#endif
@@ -231,50 +145,57 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
+ *
+ * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
#define BDI_CAP_NO_ACCT_WB 0x00000004
#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
+#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
extern struct backing_dev_info noop_backing_dev_info;
-int writeback_in_progress(struct backing_dev_info *bdi);
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @wb: bdi_writeback of interest
+ *
+ * Determine whether there is writeback waiting to be handled against a
+ * bdi_writeback.
+ */
+static inline bool writeback_in_progress(struct bdi_writeback *wb)
{
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, bdi_bits);
- return (bdi->state & bdi_bits);
+ return test_bit(WB_writeback_running, &wb->state);
}
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
- return bdi_congested(bdi, 1 << BDI_sync_congested);
-}
+ struct super_block *sb;
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << BDI_async_congested);
+ if (!inode)
+ return &noop_backing_dev_info;
+
+ sb = inode->i_sb;
+#ifdef CONFIG_BLOCK
+ if (sb_is_blkdev_sb(sb))
+ return blk_get_backing_dev_info(I_BDEV(inode));
+#endif
+ return sb->s_bdi;
}
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
{
- return bdi_congested(bdi, (1 << BDI_sync_congested) |
- (1 << BDI_async_congested));
-}
+ struct backing_dev_info *bdi = wb->bdi;
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
+ if (bdi->congested_fn)
+ return bdi->congested_fn(bdi->congested_data, cong_bits);
+ return wb->congested->state & cong_bits;
+}
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
-void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct zone *zone, int sync, long timeout);
int pdflush_proc_obsolete(struct ctl_table *table, int write,
@@ -318,4 +239,279 @@ static inline int bdi_sched_wait(void *word)
return 0;
}
-#endif /* _LINUX_BACKING_DEV_H */
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
+void wb_congested_put(struct bdi_writeback_congested *congested);
+struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
+ struct cgroup_subsys_state *memcg_css,
+ gfp_t gfp);
+void wb_memcg_offline(struct mem_cgroup *memcg);
+void wb_blkcg_offline(struct blkcg *blkcg);
+int inode_congested(struct inode *inode, int cong_bits);
+
+/**
+ * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
+ * @inode: inode of interest
+ *
+ * cgroup writeback requires support from both the bdi and filesystem.
+ * Also, both memcg and iocg have to be on the default hierarchy. Test
+ * whether all conditions are met.
+ *
+ * Note that the test result may change dynamically on the same inode
+ * depending on how memcg and iocg are configured.
+ */
+static inline bool inode_cgwb_enabled(struct inode *inode)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+ cgroup_subsys_on_dfl(io_cgrp_subsys) &&
+ bdi_cap_account_dirty(bdi) &&
+ (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB);
+}
+
+/**
+ * wb_find_current - find wb for %current on a bdi
+ * @bdi: bdi of interest
+ *
+ * Find the wb of @bdi which matches both the memcg and blkcg of %current.
+ * Must be called under rcu_read_lock() which protects the returend wb.
+ * NULL if not found.
+ */
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
+{
+ struct cgroup_subsys_state *memcg_css;
+ struct bdi_writeback *wb;
+
+ memcg_css = task_css(current, memory_cgrp_id);
+ if (!memcg_css->parent)
+ return &bdi->wb;
+
+ wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
+
+ /*
+ * %current's blkcg equals the effective blkcg of its memcg. No
+ * need to use the relatively expensive cgroup_get_e_css().
+ */
+ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
+ return wb;
+ return NULL;
+}
+
+/**
+ * wb_get_create_current - get or create wb for %current on a bdi
+ * @bdi: bdi of interest
+ * @gfp: allocation mask
+ *
+ * Equivalent to wb_get_create() on %current's memcg. This function is
+ * called from a relatively hot path and optimizes the common cases using
+ * wb_find_current().
+ */
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ struct bdi_writeback *wb;
+
+ rcu_read_lock();
+ wb = wb_find_current(bdi);
+ if (wb && unlikely(!wb_tryget(wb)))
+ wb = NULL;
+ rcu_read_unlock();
+
+ if (unlikely(!wb)) {
+ struct cgroup_subsys_state *memcg_css;
+
+ memcg_css = task_get_css(current, memory_cgrp_id);
+ wb = wb_get_create(bdi, memcg_css, gfp);
+ css_put(memcg_css);
+ }
+ return wb;
+}
+
+/**
+ * inode_to_wb_is_valid - test whether an inode has a wb associated
+ * @inode: inode of interest
+ *
+ * Returns %true if @inode has a wb associated. May be called without any
+ * locking.
+ */
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return inode->i_wb;
+}
+
+/**
+ * inode_to_wb - determine the wb of an inode
+ * @inode: inode of interest
+ *
+ * Returns the wb @inode is currently associated with. The caller must be
+ * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * associated wb's list_lock.
+ */
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(debug_locks &&
+ (!lockdep_is_held(&inode->i_lock) &&
+ !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_wb->list_lock)));
+#endif
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
+ * @inode: target inode
+ * @lockedp: temp bool output param, to be passed to the end function
+ *
+ * The caller wants to access the wb associated with @inode but isn't
+ * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * function determines the wb associated with @inode and ensures that the
+ * association doesn't change until the transaction is finished with
+ * unlocked_inode_to_wb_end().
+ *
+ * The caller must call unlocked_inode_to_wb_end() with *@lockdep
+ * afterwards and can't sleep during transaction. IRQ may or may not be
+ * disabled on return.
+ */
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ rcu_read_lock();
+
+ /*
+ * Paired with store_release in inode_switch_wb_work_fn() and
+ * ensures that we see the new wb if we see cleared I_WB_SWITCH.
+ */
+ *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+
+ if (unlikely(*lockedp))
+ spin_lock_irq(&inode->i_mapping->tree_lock);
+
+ /*
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+ * inode_to_wb() will bark. Deref directly.
+ */
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_end - end inode wb access transaction
+ * @inode: target inode
+ * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ */
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+ if (unlikely(locked))
+ spin_unlock_irq(&inode->i_mapping->tree_lock);
+
+ rcu_read_unlock();
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool inode_cgwb_enabled(struct inode *inode)
+{
+ return false;
+}
+
+static inline struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
+{
+ atomic_inc(&bdi->wb_congested->refcnt);
+ return bdi->wb_congested;
+}
+
+static inline void wb_congested_put(struct bdi_writeback_congested *congested)
+{
+ if (atomic_dec_and_test(&congested->refcnt))
+ kfree(congested);
+}
+
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
+{
+ return &bdi->wb;
+}
+
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ return &bdi->wb;
+}
+
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return true;
+}
+
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+ return &inode_to_bdi(inode)->wb;
+}
+
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ return inode_to_wb(inode);
+}
+
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+}
+
+static inline void wb_memcg_offline(struct mem_cgroup *memcg)
+{
+}
+
+static inline void wb_blkcg_offline(struct blkcg *blkcg)
+{
+}
+
+static inline int inode_congested(struct inode *inode, int cong_bits)
+{
+ return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+static inline int inode_read_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_sync_congested);
+}
+
+static inline int inode_write_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_async_congested);
+}
+
+static inline int inode_rw_congested(struct inode *inode)
+{
+ return inode_congested(inode, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
+}
+
+static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
+{
+ return wb_congested(&bdi->wb, cong_bits);
+}
+
+static inline int bdi_read_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_sync_congested);
+}
+
+static inline int bdi_write_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_async_congested);
+}
+
+static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
+}
+
+#endif /* _LINUX_BACKING_DEV_H */
diff --git a/kernel/include/linux/backlight.h b/kernel/include/linux/backlight.h
index adb14a861..1e7a69adb 100644
--- a/kernel/include/linux/backlight.h
+++ b/kernel/include/linux/backlight.h
@@ -117,12 +117,16 @@ struct backlight_device {
int use_count;
};
-static inline void backlight_update_status(struct backlight_device *bd)
+static inline int backlight_update_status(struct backlight_device *bd)
{
+ int ret = -ENOENT;
+
mutex_lock(&bd->update_lock);
if (bd->ops && bd->ops->update_status)
- bd->ops->update_status(bd);
+ ret = bd->ops->update_status(bd);
mutex_unlock(&bd->update_lock);
+
+ return ret;
}
extern struct backlight_device *backlight_device_register(const char *name,
diff --git a/kernel/include/linux/basic_mmio_gpio.h b/kernel/include/linux/basic_mmio_gpio.h
index 0e97856b2..ed3768f4e 100644
--- a/kernel/include/linux/basic_mmio_gpio.h
+++ b/kernel/include/linux/basic_mmio_gpio.h
@@ -74,5 +74,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/kernel/include/linux/bcm47xx_nvram.h b/kernel/include/linux/bcm47xx_nvram.h
index b12b07e75..2793652fb 100644
--- a/kernel/include/linux/bcm47xx_nvram.h
+++ b/kernel/include/linux/bcm47xx_nvram.h
@@ -10,11 +10,17 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/vmalloc.h>
-#ifdef CONFIG_BCM47XX
+#ifdef CONFIG_BCM47XX_NVRAM
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
+char *bcm47xx_nvram_get_contents(size_t *val_len);
+static inline void bcm47xx_nvram_release_contents(char *nvram)
+{
+ vfree(nvram);
+};
#else
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
@@ -29,6 +35,15 @@ static inline int bcm47xx_nvram_gpio_pin(const char *name)
{
return -ENOTSUPP;
};
+
+static inline char *bcm47xx_nvram_get_contents(size_t *val_len)
+{
+ return NULL;
+};
+
+static inline void bcm47xx_nvram_release_contents(char *nvram)
+{
+};
#endif
#endif /* __BCM47XX_NVRAM_H */
diff --git a/kernel/include/linux/bcma/bcma.h b/kernel/include/linux/bcma/bcma.h
index e34f90664..3feb1b2d7 100644
--- a/kernel/include/linux/bcma/bcma.h
+++ b/kernel/include/linux/bcma/bcma.h
@@ -151,6 +151,8 @@ struct bcma_host_ops {
#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
#define BCMA_CORE_USB30_DEV 0x83D
#define BCMA_CORE_ARM_CR4 0x83E
+#define BCMA_CORE_ARM_CA7 0x847
+#define BCMA_CORE_SYS_MEM 0x849
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
@@ -305,6 +307,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
extern void bcma_driver_unregister(struct bcma_driver *drv);
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+ module_driver(__bcma_driver, bcma_driver_register, \
+ bcma_driver_unregister)
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int bcma_arch_register_fallback_sprom(
diff --git a/kernel/include/linux/bcma/bcma_driver_chipcommon.h b/kernel/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65..cf038431a 100644
--- a/kernel/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/kernel/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
- struct irq_domain *irq_domain;
#endif
};
diff --git a/kernel/include/linux/bcma/bcma_driver_pci.h b/kernel/include/linux/bcma/bcma_driver_pci.h
index 5ba6918ca..9657f11d4 100644
--- a/kernel/include/linux/bcma/bcma_driver_pci.h
+++ b/kernel/include/linux/bcma/bcma_driver_pci.h
@@ -246,7 +246,18 @@ static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
}
#endif
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+#else
+static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/kernel/include/linux/bio.h b/kernel/include/linux/bio.h
index da3a127c9..fbe47bc70 100644
--- a/kernel/include/linux/bio.h
+++ b/kernel/include/linux/bio.h
@@ -187,17 +187,6 @@ static inline void *bio_data(struct bio *bio)
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
-{
- return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
-}
-
-#define bio_io_error(bio) bio_endio((bio), -EIO)
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -290,7 +279,68 @@ static inline unsigned bio_segments(struct bio *bio)
* returns. and then bio would be freed memory when if (bio->bi_flags ...)
* runs
*/
-#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+static inline void bio_get(struct bio *bio)
+{
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_cnt);
+}
+
+static inline void bio_cnt_set(struct bio *bio, unsigned int count)
+{
+ if (count != 1) {
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ }
+ atomic_set(&bio->__bi_cnt, count);
+}
+
+static inline bool bio_flagged(struct bio *bio, unsigned int bit)
+{
+ return (bio->bi_flags & (1U << bit)) != 0;
+}
+
+static inline void bio_set_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags |= (1U << bit);
+}
+
+static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags &= ~(1U << bit);
+}
+
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+ if (unlikely(!bio_multiple_segments(bio))) {
+ *bv = bio_iovec(bio);
+ return;
+ }
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
+ if (!iter.bi_bvec_done)
+ idx = iter.bi_idx - 1;
+ else /* in the middle of bvec */
+ idx = iter.bi_idx;
+
+ *bv = bio->bi_io_vec[idx];
+
+ /*
+ * iter.bi_bvec_done records actual length of the last bvec
+ * if this bio ends in the middle of one io vector
+ */
+ if (iter.bi_bvec_done)
+ bv->bv_len = iter.bi_bvec_done;
+}
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -412,8 +462,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
-extern void bio_endio(struct bio *, int);
-extern void bio_endio_nodec(struct bio *, int);
+extern void bio_endio(struct bio *);
+
+static inline void bio_io_error(struct bio *bio)
+{
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+}
+
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -427,7 +483,6 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-extern int bio_get_nr_vecs(struct block_device *);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
const struct iov_iter *, gfp_t);
@@ -469,9 +524,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_blkcg(struct bio *bio,
+ struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
@@ -701,7 +759,7 @@ extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_prep(struct bio *);
-extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_endio(struct bio *);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/kernel/include/linux/bitmap.h b/kernel/include/linux/bitmap.h
index ea17cca9e..9653fdb76 100644
--- a/kernel/include/linux/bitmap.h
+++ b/kernel/include/linux/bitmap.h
@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/kernel/include/linux/bitops.h b/kernel/include/linux/bitops.h
index 297f5bda4..defeaac07 100644
--- a/kernel/include/linux/bitops.h
+++ b/kernel/include/linux/bitops.h
@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-static __inline__ int get_bitmask_order(unsigned int count)
+static inline int get_bitmask_order(unsigned int count)
{
int order;
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
-static __inline__ int get_count_order(unsigned int count)
+static inline int get_count_order(unsigned int count)
{
int order;
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
return order;
}
-static inline unsigned long hweight_long(unsigned long w)
+static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> (32 - shift));
+ return (word << shift) | (word >> ((-shift) & 31));
}
/**
@@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
+ *
+ * This is safe to use for 16- and 8-bit types as well.
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
@@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index)
return (__s32)(value << shift) >> shift;
}
+/**
+ * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
+ * @value: value to sign extend
+ * @index: 0 based bit index (0<=index<64) to sign bit
+ */
+static inline __s64 sign_extend64(__u64 value, int index)
+{
+ __u8 shift = 63 - index;
+ return (__s64)(value << shift) >> shift;
+}
+
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
diff --git a/kernel/include/linux/blk-cgroup.h b/kernel/include/linux/blk-cgroup.h
new file mode 100644
index 000000000..c02e66994
--- /dev/null
+++ b/kernel/include/linux/blk-cgroup.h
@@ -0,0 +1,786 @@
+#ifndef _BLK_CGROUP_H
+#define _BLK_CGROUP_H
+/*
+ * Common Block IO controller cgroup interface
+ *
+ * Based on ideas and code from CFQ, CFS and BFQ:
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ *
+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
+ * Paolo Valente <paolo.valente@unimore.it>
+ *
+ * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
+ * Nauman Rafique <nauman@google.com>
+ */
+
+#include <linux/cgroup.h>
+#include <linux/percpu_counter.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/blkdev.h>
+#include <linux/atomic.h>
+
+/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
+#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+
+/* Max limits for throttle policy */
+#define THROTL_IOPS_MAX UINT_MAX
+
+#ifdef CONFIG_BLK_CGROUP
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+struct blkcg_gq;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
+
+ struct list_head all_blkcgs_node;
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+#endif
+};
+
+/*
+ * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
+ * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
+ * to carry result values from read and sum operations.
+ */
+struct blkg_stat {
+ struct percpu_counter cpu_cnt;
+ atomic64_t aux_cnt;
+};
+
+struct blkg_rwstat {
+ struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
+ atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+};
+
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each blkg:policy pair is
+ * represented by a blkg_policy_data which is allocated and freed by each
+ * policy's pd_alloc/free_fn() methods. A policy can allocate private data
+ * area by allocating larger data structure which embeds blkg_policy_data
+ * at the beginning.
+ */
+struct blkg_policy_data {
+ /* the blkg and policy id this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+ int plid;
+};
+
+/*
+ * Policies that need to keep per-blkcg data which is independent from any
+ * request_queue associated to it should implement cpd_alloc/free_fn()
+ * methods. A policy can allocate private data area by allocating larger
+ * data structure which embeds blkcg_policy_data at the beginning.
+ * cpd_init() is invoked to let each policy handle per-blkcg data.
+ */
+struct blkcg_policy_data {
+ /* the blkcg and policy id this per-policy data belongs to */
+ struct blkcg *blkcg;
+ int plid;
+};
+
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+
+ /*
+ * Each blkg gets congested separately and the congestion state is
+ * propagated to the matching bdi_writeback_congested.
+ */
+ struct bdi_writeback_congested *wb_congested;
+
+ /* all non-root blkcg_gq's are guaranteed to have access to parent */
+ struct blkcg_gq *parent;
+
+ /* request allocation list for this blkcg-q pair */
+ struct request_list rl;
+
+ /* reference count */
+ atomic_t refcnt;
+
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
+};
+
+typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
+typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
+typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
+
+struct blkcg_policy {
+ int plid;
+ /* cgroup files for the policy */
+ struct cftype *dfl_cftypes;
+ struct cftype *legacy_cftypes;
+
+ /* operations */
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
+ blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
+ blkcg_pol_bind_cpd_fn *cpd_bind_fn;
+
+ blkcg_pol_alloc_pd_fn *pd_alloc_fn;
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
+ blkcg_pol_free_pd_fn *pd_free_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+};
+
+extern struct blkcg blkcg_root;
+extern struct cgroup_subsys_state * const blkcg_root_css;
+
+struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
+ struct request_queue *q, bool update_hint);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
+/* Blkio controller policy registration */
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+const char *blkg_dev_name(struct blkcg_gq *blkg);
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+int blkg_print_stat_bytes(struct seq_file *sf, void *v);
+int blkg_print_stat_ios(struct seq_file *sf, void *v);
+int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
+int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
+
+u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ char *body;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct blkcg, css) : NULL;
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+ return css_to_blkcg(task_css(tsk, io_cgrp_id));
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_css)
+ return css_to_blkcg(bio->bi_css);
+ return task_blkcg(current);
+}
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return task_get_css(task, io_cgrp_id);
+}
+
+/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg. Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+ return css_to_blkcg(blkcg->css.parent);
+}
+
+/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state. If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
+static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q,
+ bool update_hint)
+{
+ struct blkcg_gq *blkg;
+
+ if (blkcg == &blkcg_root)
+ return q->root_blkg;
+
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ return blkg_lookup_slowpath(blkcg, q, update_hint);
+}
+
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q, false);
+}
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
+ struct blkcg_policy *pol)
+{
+ return blkcg ? blkcg->cpd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
+{
+ return cpd ? cpd->blkcg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ char *p;
+
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
+ strncpy(buf, "<unavailable>", buflen);
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
+}
+
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
+}
+
+void __blkg_release_rcu(struct rcu_head *rcu);
+
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+}
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Similar to blkg_for_each_descendant_pre() but performs post-order
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
+ */
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio. Find
+ * the request_list to use and obtain a reference on it. Should be called
+ * under queue_lock. This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+
+ /* bypass blkg lookup and use @q->root_rl directly for root */
+ if (blkcg == &blkcg_root)
+ goto root_rl;
+
+ /*
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
+ */
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg))
+ goto root_rl;
+
+ blkg_get(blkg);
+ rcu_read_unlock();
+ return &blkg->rl;
+root_rl:
+ rcu_read_unlock();
+ return &q->root_rl;
+}
+
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl(). Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+ if (rl->blkg->blkcg != &blkcg_root)
+ blkg_put(rl->blkg);
+}
+
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+ rq->rl = rl;
+}
+
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+ return rq->rl;
+}
+
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
+
+static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
+{
+ int ret;
+
+ ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
+ if (ret)
+ return ret;
+
+ atomic64_set(&stat->aux_cnt, 0);
+ return 0;
+}
+
+static inline void blkg_stat_exit(struct blkg_stat *stat)
+{
+ percpu_counter_destroy(&stat->cpu_cnt);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller must ensure that IRQ on the same CPU
+ * don't re-enter this function for the same counter.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ return percpu_counter_sum_positive(&stat->cpu_cnt);
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ percpu_counter_set(&stat->cpu_cnt, 0);
+ atomic64_set(&stat->aux_cnt, 0);
+}
+
+/**
+ * blkg_stat_add_aux - add a blkg_stat into another's aux count
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count including the aux one to @to's aux count.
+ */
+static inline void blkg_stat_add_aux(struct blkg_stat *to,
+ struct blkg_stat *from)
+{
+ atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
+ &to->aux_cnt);
+}
+
+static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
+{
+ int i, ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
+ if (ret) {
+ while (--i >= 0)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ return ret;
+ }
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+ return 0;
+}
+
+static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ struct percpu_counter *cnt;
+
+ if (rw & REQ_WRITE)
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
+
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+
+ if (rw & REQ_SYNC)
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
+
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it in the aux counts.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat result;
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_set(&result.aux_cnt[i],
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
+ return result;
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+}
+
+/**
+ * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's count including the aux one to @to's aux count.
+ */
+static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ struct blkg_rwstat v = blkg_rwstat_read(from);
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(atomic64_read(&v.aux_cnt[i]) +
+ atomic64_read(&from->aux_cnt[i]),
+ &to->aux_cnt[i]);
+}
+
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio);
+#else
+static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio) { return false; }
+#endif
+
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ bool throtl = false;
+
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ throtl = blk_throtl_bio(q, blkg, bio);
+
+ if (!throtl) {
+ blkg = blkg ?: q->root_blkg;
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
+ bio->bi_iter.bi_size);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
+ }
+
+ rcu_read_unlock();
+ return !throtl;
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct blkcg {
+};
+
+struct blkg_policy_data {
+};
+
+struct blkcg_policy_data {
+};
+
+struct blkcg_gq {
+};
+
+struct blkcg_policy {
+};
+
+#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return NULL;
+}
+
+#ifdef CONFIG_BLOCK
+
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio) { return true; }
+
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
+#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/kernel/include/linux/blk-mq.h b/kernel/include/linux/blk-mq.h
index e6ff990c9..463df8954 100644
--- a/kernel/include/linux/blk-mq.h
+++ b/kernel/include/linux/blk-mq.h
@@ -59,6 +59,9 @@ struct blk_mq_hw_ctx {
struct blk_mq_cpu_notifier cpu_notifier;
struct kobject kobj;
+
+ unsigned long poll_invoked;
+ unsigned long poll_success;
};
struct blk_mq_tag_set {
@@ -96,6 +99,9 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
+typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+
struct blk_mq_ops {
/*
@@ -113,6 +119,11 @@ struct blk_mq_ops {
*/
timeout_fn *timeout;
+ /*
+ * Called to poll for completion of a specific tag.
+ */
+ poll_fn *poll;
+
softirq_done_fn *complete;
/*
@@ -144,7 +155,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -166,7 +176,6 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
-void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -182,6 +191,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -214,7 +224,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -223,7 +233,7 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
diff --git a/kernel/include/linux/blk_types.h b/kernel/include/linux/blk_types.h
index b7299febc..0fb65843e 100644
--- a/kernel/include/linux/blk_types.h
+++ b/kernel/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
struct block_device;
struct io_context;
struct cgroup_subsys_state;
-typedef void (bio_end_io_t) (struct bio *, int);
+typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
/*
@@ -46,7 +46,8 @@ struct bvec_iter {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned long bi_flags; /* status, command, etc */
+ unsigned int bi_flags; /* status, command, etc */
+ int bi_error;
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
@@ -65,7 +66,7 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
- atomic_t bi_remaining;
+ atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
@@ -92,7 +93,7 @@ struct bio {
unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
- atomic_t bi_cnt; /* pin count */
+ atomic_t __bi_cnt; /* pin count */
struct bio_vec *bi_io_vec; /* the actual vec list */
@@ -111,17 +112,14 @@ struct bio {
/*
* bio flags
*/
-#define BIO_UPTODATE 0 /* ok after I/O completion */
-#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
-#define BIO_EOF 2 /* out-out-bounds error */
-#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
-#define BIO_CLONED 4 /* doesn't own data */
-#define BIO_BOUNCED 5 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6 /* contains user pages */
-#define BIO_EOPNOTSUPP 7 /* not supported */
-#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
-#define BIO_QUIET 9 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
+#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
+#define BIO_CLONED 2 /* doesn't own data */
+#define BIO_BOUNCED 3 /* bio is a bounce bio */
+#define BIO_USER_MAPPED 4 /* contains user pages */
+#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
+#define BIO_QUIET 6 /* Make BIO Quiet */
+#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -130,14 +128,12 @@ struct bio {
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
-#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
-
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
-#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
@@ -248,4 +244,28 @@ enum rq_flag_bits {
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_SHIFT 16
+
+static inline bool blk_qc_t_valid(blk_qc_t cookie)
+{
+ return cookie != BLK_QC_T_NONE;
+}
+
+static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
+{
+ return tag | (queue_num << BLK_QC_T_SHIFT);
+}
+
+static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
+{
+ return cookie >> BLK_QC_T_SHIFT;
+}
+
+static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
+{
+ return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
+}
+
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/kernel/include/linux/blkdev.h b/kernel/include/linux/blkdev.h
index 37faf63af..a8f18e02b 100644
--- a/kernel/include/linux/blkdev.h
+++ b/kernel/include/linux/blkdev.h
@@ -12,7 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
-#include <linux/backing-dev.h>
+#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
@@ -22,21 +22,20 @@
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
struct module;
struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
-struct request_pm_state;
struct blk_trace;
struct request;
struct sg_io_hdr;
struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct pr_ops;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -75,18 +74,7 @@ struct request_list {
enum rq_cmd_type_bits {
REQ_TYPE_FS = 1, /* fs request */
REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_SENSE, /* sense request */
- REQ_TYPE_PM_SUSPEND, /* suspend request */
- REQ_TYPE_PM_RESUME, /* resume request */
- REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
- REQ_TYPE_SPECIAL, /* driver defined type */
- /*
- * for ATA/ATAPI devices. this really doesn't belong here, ide should
- * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
- * private REQ_LB opcodes to differentiate what type of request this is
- */
- REQ_TYPE_ATA_TASKFILE,
- REQ_TYPE_ATA_PC,
+ REQ_TYPE_DRV_PRIV, /* driver defined types from here */
};
#define BLK_MAX_CDB 16
@@ -109,7 +97,7 @@ struct request {
struct blk_mq_ctx *mq_ctx;
u64 cmd_flags;
- enum rq_cmd_type_bits cmd_type;
+ unsigned cmd_type;
unsigned long atomic_flags;
int cpu;
@@ -217,37 +205,16 @@ static inline unsigned short req_get_ioprio(struct request *req)
return req->ioprio;
}
-/*
- * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
- * requests. Some step values could eventually be made generic.
- */
-struct request_pm_state
-{
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
#include <linux/elevator.h>
struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
-typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-struct bvec_merge_data {
- struct block_device *bi_bdev;
- sector_t bi_sector;
- unsigned bi_size;
- unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
- struct bio_vec *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
@@ -285,8 +252,10 @@ struct blk_queue_tag {
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
+ unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
@@ -295,6 +264,7 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
+ unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -332,7 +302,6 @@ struct request_queue {
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
- merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
@@ -403,6 +372,10 @@ struct request_queue {
*/
struct kobject mq_kobj;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity integrity;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#ifdef CONFIG_PM
struct device *dev;
int rpm_status;
@@ -470,7 +443,7 @@ struct request_queue {
struct mutex sysfs_lock;
int bypass_depth;
- int mq_freeze_depth;
+ atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
@@ -483,12 +456,15 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- struct swait_head mq_freeze_wq;
- struct percpu_ref mq_usage_counter;
+ struct swait_queue_head mq_freeze_wq;
+ struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
+ struct bio_set *bio_split;
+
+ bool mq_sysfs_init_done;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -513,7 +489,7 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
+#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -611,10 +587,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
(((rq)->cmd_flags & REQ_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS))
-#define blk_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_PM_RESUME)
-
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
/* rq->queuelist of dequeued request must be list_empty() */
@@ -622,7 +594,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
/*
* Driver can handle struct request, if it either has an old style
@@ -792,7 +764,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern void generic_make_request(struct bio *bio);
+extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
@@ -803,7 +775,6 @@ extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -813,6 +784,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_queue_split(struct request_queue *, struct bio **,
+ struct bio_set *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -822,30 +795,15 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-/*
- * A queue has just exitted congestion. Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
-{
- clear_bdi_congested(&q->backing_dev_info, sync);
-}
-
-/*
- * A queue has just entered congestion. Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static inline void blk_set_queue_congested(struct request_queue *q, int sync)
-{
- set_bdi_congested(&q->backing_dev_info, sync);
-}
-
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
+extern void blk_start_queue_async(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q);
+extern void __blk_run_queue_uncond(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
@@ -861,6 +819,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue; /* this is never NULL */
@@ -934,7 +894,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors)
+ if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
return blk_queue_get_max_sectors(q, rq->cmd_flags);
return min(blk_max_size_offset(q, blk_rq_pos(rq)),
@@ -1002,7 +962,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
@@ -1035,9 +994,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1055,6 +1014,7 @@ bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+extern void blk_set_queue_dying(struct request_queue *);
/*
* block layer runtime pm functions
@@ -1186,6 +1146,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
+ BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
@@ -1202,6 +1163,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
return q->limits.seg_boundary_mask;
}
+static inline unsigned long queue_virt_boundary(struct request_queue *q)
+{
+ return q->limits.virt_boundary_mask;
+}
+
static inline unsigned int queue_max_sectors(struct request_queue *q)
{
return q->limits.max_sectors;
@@ -1402,6 +1368,50 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return __bvec_gap_to_prev(q, bprv, offset);
+}
+
+static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
+ struct bio *next)
+{
+ if (bio_has_data(prev) && queue_virt_boundary(q)) {
+ struct bio_vec pb, nb;
+
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
+
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ }
+
+ return false;
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, bio, req->bio);
+}
+
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1474,22 +1484,13 @@ struct blk_integrity_iter {
typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
-struct blk_integrity {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
-
- unsigned short flags;
- unsigned short tuple_size;
- unsigned short interval;
- unsigned short tag_size;
-
- const char *name;
-
- struct kobject kobj;
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ const char *name;
};
-extern bool blk_integrity_is_initialized(struct gendisk *);
-extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@ -1500,15 +1501,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
struct bio *);
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
- return bdev->bd_disk->integrity;
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
- return disk->integrity;
+ return blk_get_integrity(bdev->bd_disk);
}
static inline bool blk_integrity_rq(struct request *rq)
@@ -1528,6 +1534,26 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1562,10 +1588,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{
return 0;
}
-static inline int blk_integrity_register(struct gendisk *d,
+static inline void blk_integrity_register(struct gendisk *d,
struct blk_integrity *b)
{
- return 0;
}
static inline void blk_integrity_unregister(struct gendisk *d)
{
@@ -1590,9 +1615,16 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
{
return true;
}
-static inline bool blk_integrity_is_initialized(struct gendisk *g)
+
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
{
- return 0;
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1603,8 +1635,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t,
- void **, unsigned long *pfn, long size);
+ long (*direct_access)(struct block_device *, sector_t, void __pmem **,
+ unsigned long *pfn);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1615,6 +1647,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
struct module *owner;
+ const struct pr_ops *pr_ops;
};
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
@@ -1622,8 +1655,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
- unsigned long *pfn, long size);
+extern long bdev_direct_access(struct block_device *, sector_t,
+ void __pmem **addr, unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/kernel/include/linux/blkpg.h b/kernel/include/linux/blkpg.h
new file mode 100644
index 000000000..bef124fde
--- /dev/null
+++ b/kernel/include/linux/blkpg.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_BLKPG_H
+#define _LINUX_BLKPG_H
+
+/*
+ * Partition table and disk geometry handling
+ */
+
+#include <linux/compat.h>
+#include <uapi/linux/blkpg.h>
+
+#ifdef CONFIG_COMPAT
+/* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */
+struct blkpg_compat_ioctl_arg {
+ compat_int_t op;
+ compat_int_t flags;
+ compat_int_t datalen;
+ compat_uptr_t data;
+};
+#endif
+
+#endif /* _LINUX_BLKPG_H */
diff --git a/kernel/include/linux/bootmem.h b/kernel/include/linux/bootmem.h
index 0995c2de8..f589222bf 100644
--- a/kernel/include/linux/bootmem.h
+++ b/kernel/include/linux/bootmem.h
@@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
-#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
-#define HASHDIST_DEFAULT 1
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
-#define HASHDIST_DEFAULT 0
+#define hashdist (0)
#endif
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif /* _LINUX_BOOTMEM_H */
diff --git a/kernel/include/linux/bottom_half.h b/kernel/include/linux/bottom_half.h
index 8ca938935..d07dbeec7 100644
--- a/kernel/include/linux/bottom_half.h
+++ b/kernel/include/linux/bottom_half.h
@@ -2,16 +2,37 @@
#define _LINUX_BH_H
#include <linux/preempt.h>
-#include <linux/preempt_mask.h>
#ifdef CONFIG_PREEMPT_RT_FULL
-extern void local_bh_disable(void);
+extern void __local_bh_disable(void);
extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
-extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
-extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+extern void __local_bh_enable(void);
+
+static inline void local_bh_disable(void)
+{
+ __local_bh_disable();
+}
+
+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ __local_bh_disable();
+}
+
+static inline void local_bh_enable(void)
+{
+ __local_bh_enable();
+}
+
+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+{
+ __local_bh_enable();
+}
+
+static inline void local_bh_enable_ip(unsigned long ip)
+{
+ __local_bh_enable();
+}
#else
diff --git a/kernel/include/linux/bpf.h b/kernel/include/linux/bpf.h
index d5cda0671..83d1926c6 100644
--- a/kernel/include/linux/bpf.h
+++ b/kernel/include/linux/bpf.h
@@ -24,6 +24,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+ void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -32,8 +36,11 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u32 pages;
+ struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
+ atomic_t usercnt;
};
struct bpf_map_type_list {
@@ -96,6 +103,8 @@ enum bpf_access_type {
BPF_WRITE = 2
};
+struct bpf_prog;
+
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@@ -105,8 +114,9 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
- u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
- struct bpf_insn *insn);
+ u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn, struct bpf_prog *prog);
};
struct bpf_prog_type_list {
@@ -115,27 +125,63 @@ struct bpf_prog_type_list {
enum bpf_prog_type type;
};
-struct bpf_prog;
-
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
- struct work_struct work;
+ struct user_struct *user;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ /* 'ownership' of prog_array is claimed by the first program that
+ * is going to use this map or by the first program which FD is stored
+ * in the map to make sure that all callers and callees have the same
+ * prog_type and JITed flag
+ */
+ enum bpf_prog_type owner_prog_type;
+ bool owner_jited;
+ union {
+ char value[0] __aligned(8);
+ void *ptrs[0] __aligned(8);
+ };
+};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_fd_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+
#ifdef CONFIG_BPF_SYSCALL
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
-struct bpf_map *bpf_map_get(struct fd f);
+struct bpf_map *bpf_map_get_with_uref(u32 ufd);
+struct bpf_map *__bpf_map_get(struct fd f);
+void bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
+extern int sysctl_unprivileged_bpf_disabled;
+
+int bpf_map_new_fd(struct bpf_map *map);
+int bpf_prog_new_fd(struct bpf_prog *prog);
+
+int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
+int bpf_obj_get_user(const char __user *pathname);
+
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
@@ -160,5 +206,16 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
+extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
+
+/* Shared helpers among cBPF and eBPF. */
+void bpf_user_rnd_init_once(void);
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#endif /* _LINUX_BPF_H */
diff --git a/kernel/include/linux/brcmphy.h b/kernel/include/linux/brcmphy.h
index 656da2a12..59f4a7304 100644
--- a/kernel/include/linux/brcmphy.h
+++ b/kernel/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
+#include <linux/phy.h>
+
+/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
+ * to configure the switch internal registers via MDIO accesses.
+ */
+#define BRCM_PSEUDO_PHY_ADDR 30
+
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
@@ -23,6 +30,8 @@
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM_CYGNUS 0xae025200
+
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
#define PHY_BCM_OUI_2 0x0143bc00
@@ -131,7 +140,10 @@
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
+#define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */
#define BCM54XX_SHD_APD_EN 0x0020
+#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */
+#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */
#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
/* LED3 / ~LINKSPD[2] selector */
@@ -202,27 +214,13 @@
#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
-/*
- * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
- * 0x1c shadow registers.
- */
-static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
-{
- phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
- return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
-}
-
-static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
- u16 val)
-{
- return phy_write(phydev, MII_BCM54XX_SHD,
- MII_BCM54XX_SHD_WRITE |
- MII_BCM54XX_SHD_VAL(shadow) |
- MII_BCM54XX_SHD_DATA(val));
-}
-
#define BRCM_CL45VEN_EEE_CONTROL 0x803d
#define LPI_FEATURE_EN 0x8000
#define LPI_FEATURE_EN_DIG1000X 0x4000
+/* Core register definitions*/
+#define MII_BRCM_CORE_BASE1E 0x1E
+#define MII_BRCM_CORE_EXPB0 0xB0
+#define MII_BRCM_CORE_EXPB1 0xB1
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/kernel/include/linux/buffer_head.h b/kernel/include/linux/buffer_head.h
index 6d25afd8b..4a201008b 100644
--- a/kernel/include/linux/buffer_head.h
+++ b/kernel/include/linux/buffer_head.h
@@ -77,8 +77,7 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
spinlock_t b_uptodate_lock;
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+#if IS_ENABLED(CONFIG_JBD2)
spinlock_t b_state_lock;
spinlock_t b_journal_head_lock;
#endif
@@ -113,8 +112,7 @@ static inline void buffer_head_init_locks(struct buffer_head *bh)
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+#if IS_ENABLED(CONFIG_JBD2)
spin_lock_init(&bh->b_state_lock);
spin_lock_init(&bh->b_journal_head_lock);
#endif
@@ -271,8 +269,6 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
-int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
diff --git a/kernel/include/linux/cacheinfo.h b/kernel/include/linux/cacheinfo.h
index 3daf5ed39..218993507 100644
--- a/kernel/include/linux/cacheinfo.h
+++ b/kernel/include/linux/cacheinfo.h
@@ -19,7 +19,7 @@ enum cache_type {
/**
* struct cacheinfo - represent a cache leaf node
* @type: type of the cache - data, inst or unified
- * @level: represents the hierarcy in the multi-level cache
+ * @level: represents the hierarchy in the multi-level cache
* @coherency_line_size: size of each cache line usually representing
* the minimum amount of data that gets transferred from memory
* @number_of_sets: total number of sets, a set is a collection of cache
diff --git a/kernel/include/linux/can/dev.h b/kernel/include/linux/can/dev.h
index c3a9c8fc6..735f9f8c4 100644
--- a/kernel/include/linux/can/dev.h
+++ b/kernel/include/linux/can/dev.h
@@ -14,9 +14,10 @@
#define _CAN_DEV_H
#include <linux/can.h>
-#include <linux/can/netlink.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/netlink.h>
+#include <linux/netdevice.h>
/*
* CAN mode
@@ -77,7 +78,7 @@ struct can_priv {
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline int can_dropped_invalid_skb(struct net_device *dev,
+static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
{
const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
@@ -93,12 +94,12 @@ static inline int can_dropped_invalid_skb(struct net_device *dev,
} else
goto inval_skb;
- return 0;
+ return false;
inval_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
- return 1;
+ return true;
}
static inline bool can_is_canfd_skb(const struct sk_buff *skb)
diff --git a/kernel/include/linux/can/led.h b/kernel/include/linux/can/led.h
index 146de4506..2746f7c2f 100644
--- a/kernel/include/linux/can/led.h
+++ b/kernel/include/linux/can/led.h
@@ -11,6 +11,7 @@
#include <linux/if.h>
#include <linux/leds.h>
+#include <linux/netdevice.h>
enum can_led_event {
CAN_LED_EVENT_OPEN,
diff --git a/kernel/include/linux/ceph/ceph_features.h b/kernel/include/linux/ceph/ceph_features.h
index 4763ad64e..f89b31d45 100644
--- a/kernel/include/linux/ceph/ceph_features.h
+++ b/kernel/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
diff --git a/kernel/include/linux/ceph/libceph.h b/kernel/include/linux/ceph/libceph.h
index 30f92cefa..3e3799cdc 100644
--- a/kernel/include/linux/ceph/libceph.h
+++ b/kernel/include/linux/ceph/libceph.h
@@ -29,8 +29,9 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -43,9 +44,10 @@ struct ceph_options {
int flags;
struct ceph_fsid fsid;
struct ceph_entity_addr my_addr;
- int mount_timeout;
- int osd_idle_ttl;
- int osd_keepalive_timeout;
+ unsigned long mount_timeout; /* jiffies */
+ unsigned long osd_idle_ttl; /* jiffies */
+ unsigned long osd_keepalive_timeout; /* jiffies */
+ unsigned long monc_ping_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -63,9 +65,10 @@ struct ceph_options {
/*
* defaults
*/
-#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
-#define CEPH_OSD_KEEPALIVE_DEFAULT 5
-#define CEPH_OSD_IDLE_TTL_DEFAULT 60
+#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
+#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -93,13 +96,9 @@ enum {
CEPH_MOUNT_SHUTDOWN,
};
-/*
- * subtract jiffies
- */
-static inline unsigned long time_sub(unsigned long a, unsigned long b)
+static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
- BUG_ON(time_after(b, a));
- return (long)a - (long)b;
+ return timeout ?: MAX_SCHEDULE_TIMEOUT;
}
struct ceph_mds_client;
@@ -139,6 +138,7 @@ struct ceph_client {
#endif
};
+#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
/*
@@ -178,6 +178,7 @@ static inline int calc_pages_for(u64 off, u64 len)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
diff --git a/kernel/include/linux/ceph/messenger.h b/kernel/include/linux/ceph/messenger.h
index e15499422..8dbd7879f 100644
--- a/kernel/include/linux/ceph/messenger.h
+++ b/kernel/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
#include <linux/radix-tree.h>
#include <linux/uio.h>
#include <linux/workqueue.h>
+#include <net/net_namespace.h>
#include <linux/ceph/types.h>
#include <linux/ceph/buffer.h>
@@ -42,10 +43,9 @@ struct ceph_connection_operations {
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
- int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
- int (*check_message_signature) (struct ceph_connection *con,
- struct ceph_msg *msg);
+ int (*sign_message) (struct ceph_msg *msg);
+ int (*check_message_signature) (struct ceph_msg *msg);
};
/* use format string %s%d */
@@ -56,8 +56,7 @@ struct ceph_messenger {
struct ceph_entity_addr my_enc_addr;
atomic_t stopping;
- bool nocrc;
- bool tcp_nodelay;
+ possible_net_t net;
/*
* the global_seq counts connections i (attempt to) initiate
@@ -65,9 +64,6 @@ struct ceph_messenger {
*/
u32 global_seq;
spinlock_t global_seq_lock;
-
- u64 supported_features;
- u64 required_features;
};
enum ceph_msg_data_type {
@@ -224,6 +220,7 @@ struct ceph_connection {
struct ceph_entity_addr actual_peer_addr;
/* message out temps */
+ struct ceph_msg_header out_hdr;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
bool out_msg_done;
@@ -233,9 +230,10 @@ struct ceph_connection {
int out_kvec_left; /* kvec's left in out_kvec */
int out_skip; /* skip this many bytes */
int out_kvec_bytes; /* total bytes left */
- bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
/* message in temps */
struct ceph_msg_header in_hdr;
@@ -246,6 +244,8 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
+ struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
};
@@ -262,11 +262,8 @@ extern void ceph_msgr_exit(void);
extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
- struct ceph_entity_addr *myaddr,
- u64 supported_features,
- u64 required_features,
- bool nocrc,
- bool tcp_nodelay);
+ struct ceph_entity_addr *myaddr);
+extern void ceph_messenger_fini(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
@@ -282,6 +279,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
extern void ceph_con_keepalive(struct ceph_connection *con);
+extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
+ unsigned long interval);
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
size_t length, size_t alignment);
diff --git a/kernel/include/linux/ceph/msgr.h b/kernel/include/linux/ceph/msgr.h
index 1c1887206..0fe2656ac 100644
--- a/kernel/include/linux/ceph/msgr.h
+++ b/kernel/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_MSG 7 /* message */
#define CEPH_MSGR_TAG_ACK 8 /* message ack */
#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
-#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
+#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
+#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
+#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
/*
diff --git a/kernel/include/linux/ceph/osd_client.h b/kernel/include/linux/ceph/osd_client.h
index 61b19c46b..7506b485b 100644
--- a/kernel/include/linux/ceph/osd_client.h
+++ b/kernel/include/linux/ceph/osd_client.h
@@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode);
+ unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
unsigned int which,
diff --git a/kernel/include/linux/cgroup-defs.h b/kernel/include/linux/cgroup-defs.h
new file mode 100644
index 000000000..63f0063c0
--- /dev/null
+++ b/kernel/include/linux/cgroup-defs.h
@@ -0,0 +1,532 @@
+/*
+ * linux/cgroup-defs.h - basic definitions for cgroup
+ *
+ * This file provides basic type and interface. Include this file directly
+ * only if necessary to avoid cyclic dependencies.
+ */
+#ifndef _LINUX_CGROUP_DEFS_H
+#define _LINUX_CGROUP_DEFS_H
+
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/percpu-refcount.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/workqueue.h>
+#include <linux/work-simple.h>
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup;
+struct cgroup_root;
+struct cgroup_subsys;
+struct cgroup_taskset;
+struct kernfs_node;
+struct kernfs_ops;
+struct kernfs_open_file;
+struct seq_file;
+
+#define MAX_CGROUP_TYPE_NAMELEN 32
+#define MAX_CGROUP_ROOT_NAMELEN 64
+#define MAX_CFTYPE_NAME 64
+
+/* define the enumeration of all cgroup subsystems */
+#define SUBSYS(_x) _x ## _cgrp_id,
+#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
+ __unused_tag_ ## _t = CGROUP_ ## _t - 1,
+enum cgroup_subsys_id {
+#include <linux/cgroup_subsys.h>
+ CGROUP_SUBSYS_COUNT,
+};
+#undef SUBSYS_TAG
+#undef SUBSYS
+
+#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
+
+/* bits in struct cgroup_subsys_state flags field */
+enum {
+ CSS_NO_REF = (1 << 0), /* no reference counting for this css */
+ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
+ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+};
+
+/* bits in struct cgroup flags field */
+enum {
+ /* Control Group requires release notifications to userspace */
+ CGRP_NOTIFY_ON_RELEASE,
+ /*
+ * Clone the parent's configuration when creating a new child
+ * cpuset cgroup. For historical reasons, this option can be
+ * specified at mount time and thus is implemented here.
+ */
+ CGRP_CPUSET_CLONE_CHILDREN,
+};
+
+/* cgroup_root->flags */
+enum {
+ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
+ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
+ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
+};
+
+/* cftype->flags */
+enum {
+ CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
+ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
+ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
+ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
+
+ /* internal flags, do not use outside cgroup core proper */
+ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
+ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+};
+
+/*
+ * cgroup_file is the handle for a file instance created in a cgroup which
+ * is used, for example, to generate file changed notifications. This can
+ * be obtained by setting cftype->file_offset.
+ */
+struct cgroup_file {
+ /* do not access any fields from outside cgroup core */
+ struct kernfs_node *kn;
+};
+
+/*
+ * Per-subsystem/per-cgroup state maintained by the system. This is the
+ * fundamental structural building block that controllers deal with.
+ *
+ * Fields marked with "PI:" are public and immutable and may be accessed
+ * directly without synchronization.
+ */
+struct cgroup_subsys_state {
+ /* PI: the cgroup that this css is attached to */
+ struct cgroup *cgroup;
+
+ /* PI: the cgroup subsystem that this css is attached to */
+ struct cgroup_subsys *ss;
+
+ /* reference count - access via css_[try]get() and css_put() */
+ struct percpu_ref refcnt;
+
+ /* PI: the parent css */
+ struct cgroup_subsys_state *parent;
+
+ /* siblings list anchored at the parent's ->children */
+ struct list_head sibling;
+ struct list_head children;
+
+ /*
+ * PI: Subsys-unique ID. 0 is unused and root is always 1. The
+ * matching css can be looked up using css_from_id().
+ */
+ int id;
+
+ unsigned int flags;
+
+ /*
+ * Monotonically increasing unique serial number which defines a
+ * uniform order among all csses. It's guaranteed that all
+ * ->children lists are in the ascending order of ->serial_nr and
+ * used to allow interrupting and resuming iterations.
+ */
+ u64 serial_nr;
+
+ /*
+ * Incremented by online self and children. Used to guarantee that
+ * parents are not offlined before their children.
+ */
+ atomic_t online_cnt;
+
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
+ struct swork_event destroy_swork;
+};
+
+/*
+ * A css_set is a structure holding pointers to a set of
+ * cgroup_subsys_state objects. This saves space in the task struct
+ * object and speeds up fork()/exit(), since a single inc/dec and a
+ * list_add()/del() can bump the reference count on the entire cgroup
+ * set for a task.
+ */
+struct css_set {
+ /* Reference count */
+ atomic_t refcount;
+
+ /*
+ * List running through all cgroup groups in the same hash
+ * slot. Protected by css_set_lock
+ */
+ struct hlist_node hlist;
+
+ /*
+ * Lists running through all tasks using this cgroup group.
+ * mg_tasks lists tasks which belong to this cset but are in the
+ * process of being migrated out or in. Protected by
+ * css_set_rwsem, but, during migration, once tasks are moved to
+ * mg_tasks, it can be read safely while holding cgroup_mutex.
+ */
+ struct list_head tasks;
+ struct list_head mg_tasks;
+
+ /*
+ * List of cgrp_cset_links pointing at cgroups referenced from this
+ * css_set. Protected by css_set_lock.
+ */
+ struct list_head cgrp_links;
+
+ /* the default cgroup associated with this css_set */
+ struct cgroup *dfl_cgrp;
+
+ /*
+ * Set of subsystem states, one for each subsystem. This array is
+ * immutable after creation apart from the init_css_set during
+ * subsystem registration (at boot time).
+ */
+ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * List of csets participating in the on-going migration either as
+ * source or destination. Protected by cgroup_mutex.
+ */
+ struct list_head mg_preload_node;
+ struct list_head mg_node;
+
+ /*
+ * If this cset is acting as the source of migration the following
+ * two fields are set. mg_src_cgrp is the source cgroup of the
+ * on-going migration and mg_dst_cset is the destination cset the
+ * target tasks on this cset should be migrated to. Protected by
+ * cgroup_mutex.
+ */
+ struct cgroup *mg_src_cgrp;
+ struct css_set *mg_dst_cset;
+
+ /*
+ * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * attached to an ancestor instead of the cgroup this css_set is
+ * associated with. The following node is anchored at
+ * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
+ * iterate through all css's attached to a given cgroup.
+ */
+ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+
+ /* all css_task_iters currently walking this cset */
+ struct list_head task_iters;
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
+};
+
+struct cgroup {
+ /* self css with NULL ->ss, points back to this cgroup */
+ struct cgroup_subsys_state self;
+
+ unsigned long flags; /* "unsigned long" so bitops work */
+
+ /*
+ * idr allocated in-hierarchy ID.
+ *
+ * ID 0 is not used, the ID of the root cgroup is always 1, and a
+ * new cgroup will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
+ */
+ int id;
+
+ /*
+ * Each non-empty css_set associated with this cgroup contributes
+ * one to populated_cnt. All children with non-zero popuplated_cnt
+ * of their own contribute one. The count is zero iff there's no
+ * task in this cgroup or its subtree.
+ */
+ int populated_cnt;
+
+ struct kernfs_node *kn; /* cgroup kernfs entry */
+ struct cgroup_file procs_file; /* handle for "cgroup.procs" */
+ struct cgroup_file events_file; /* handle for "cgroup.events" */
+
+ /*
+ * The bitmask of subsystems enabled on the child cgroups.
+ * ->subtree_control is the one configured through
+ * "cgroup.subtree_control" while ->child_subsys_mask is the
+ * effective one which may have more subsystems enabled.
+ * Controller knobs are made available iff it's enabled in
+ * ->subtree_control.
+ */
+ unsigned int subtree_control;
+ unsigned int child_subsys_mask;
+
+ /* Private pointers for each registered subsystem */
+ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+
+ struct cgroup_root *root;
+
+ /*
+ * List of cgrp_cset_links pointing at css_sets with tasks in this
+ * cgroup. Protected by css_set_lock.
+ */
+ struct list_head cset_links;
+
+ /*
+ * On the default hierarchy, a css_set for a cgroup with some
+ * susbsys disabled will point to css's which are associated with
+ * the closest ancestor which has the subsys enabled. The
+ * following lists all css_sets which point to this cgroup's css
+ * for the given subsystem.
+ */
+ struct list_head e_csets[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+ * for tasks); created on demand.
+ */
+ struct list_head pidlists;
+ struct mutex pidlist_mutex;
+
+ /* used to wait for offlining of csses */
+ wait_queue_head_t offline_waitq;
+
+ /* used to schedule release agent */
+ struct work_struct release_agent_work;
+};
+
+/*
+ * A cgroup_root represents the root of a cgroup hierarchy, and may be
+ * associated with a kernfs_root to form an active hierarchy. This is
+ * internal to cgroup core. Don't access directly from controllers.
+ */
+struct cgroup_root {
+ struct kernfs_root *kf_root;
+
+ /* The bitmask of subsystems attached to this hierarchy */
+ unsigned int subsys_mask;
+
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
+ /* The root cgroup. Root is destroyed on its release. */
+ struct cgroup cgrp;
+
+ /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+ atomic_t nr_cgrps;
+
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+
+ /* Hierarchy-specific flags */
+ unsigned int flags;
+
+ /* IDs for cgroups in this hierarchy */
+ struct idr cgroup_idr;
+
+ /* The path to use for release notifications. */
+ char release_agent_path[PATH_MAX];
+
+ /* The name for this hierarchy - may be empty */
+ char name[MAX_CGROUP_ROOT_NAMELEN];
+};
+
+/*
+ * struct cftype: handler definitions for cgroup control files
+ *
+ * When reading/writing to a file:
+ * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
+ * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
+ */
+struct cftype {
+ /*
+ * By convention, the name should begin with the name of the
+ * subsystem, followed by a period. Zero length string indicates
+ * end of cftype array.
+ */
+ char name[MAX_CFTYPE_NAME];
+ unsigned long private;
+
+ /*
+ * The maximum length of string, excluding trailing nul, that can
+ * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
+ */
+ size_t max_write_len;
+
+ /* CFTYPE_* flags */
+ unsigned int flags;
+
+ /*
+ * If non-zero, should contain the offset from the start of css to
+ * a struct cgroup_file field. cgroup will record the handle of
+ * the created file into it. The recorded handle can be used as
+ * long as the containing css remains accessible.
+ */
+ unsigned int file_offset;
+
+ /*
+ * Fields used for internal bookkeeping. Initialized automatically
+ * during registration.
+ */
+ struct cgroup_subsys *ss; /* NULL for cgroup core files */
+ struct list_head node; /* anchored at ss->cfts */
+ struct kernfs_ops *kf_ops;
+
+ /*
+ * read_u64() is a shortcut for the common case of returning a
+ * single integer. Use it in place of read()
+ */
+ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
+ /*
+ * read_s64() is a signed version of read_u64()
+ */
+ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
+
+ /* generic seq_file read interface */
+ int (*seq_show)(struct seq_file *sf, void *v);
+
+ /* optional ops, implement all or none */
+ void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+ void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+ void (*seq_stop)(struct seq_file *sf, void *v);
+
+ /*
+ * write_u64() is a shortcut for the common case of accepting
+ * a single integer (as parsed by simple_strtoull) from
+ * userspace. Use in place of write(); return 0 or error.
+ */
+ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val);
+ /*
+ * write_s64() is a signed version of write_u64()
+ */
+ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val);
+
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len. Use
+ * of_css/cft() to access the associated css and cft.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lockdep_key;
+#endif
+};
+
+/*
+ * Control Group subsystem type.
+ * See Documentation/cgroups/cgroups.txt for details
+ */
+struct cgroup_subsys {
+ struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+ int (*css_online)(struct cgroup_subsys_state *css);
+ void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_released)(struct cgroup_subsys_state *css);
+ void (*css_free)(struct cgroup_subsys_state *css);
+ void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_e_css_changed)(struct cgroup_subsys_state *css);
+
+ int (*can_attach)(struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_taskset *tset);
+ int (*can_fork)(struct task_struct *task, void **priv_p);
+ void (*cancel_fork)(struct task_struct *task, void *priv);
+ void (*fork)(struct task_struct *task, void *priv);
+ void (*exit)(struct task_struct *task);
+ void (*free)(struct task_struct *task);
+ void (*bind)(struct cgroup_subsys_state *root_css);
+
+ int early_init;
+
+ /*
+ * If %false, this subsystem is properly hierarchical -
+ * configuration, resource accounting and restriction on a parent
+ * cgroup cover those of its children. If %true, hierarchy support
+ * is broken in some ways - some subsystems ignore hierarchy
+ * completely while others are only implemented half-way.
+ *
+ * It's now disallowed to create nested cgroups if the subsystem is
+ * broken and cgroup core will emit a warning message on such
+ * cases. Eventually, all subsystems will be made properly
+ * hierarchical and this will go away.
+ */
+ bool broken_hierarchy;
+ bool warned_broken_hierarchy;
+
+ /* the following two fields are initialized automtically during boot */
+ int id;
+ const char *name;
+
+ /* optional, initialized automatically during boot if not set */
+ const char *legacy_name;
+
+ /* link to parent, protected by cgroup_lock() */
+ struct cgroup_root *root;
+
+ /* idr for css->id */
+ struct idr css_idr;
+
+ /*
+ * List of cftypes. Each entry is the first entry of an array
+ * terminated by zero length name.
+ */
+ struct list_head cfts;
+
+ /*
+ * Base cftypes which are automatically registered. The two can
+ * point to the same array.
+ */
+ struct cftype *dfl_cftypes; /* for the default hierarchy */
+ struct cftype *legacy_cftypes; /* for the legacy hierarchies */
+
+ /*
+ * A subsystem may depend on other subsystems. When such subsystem
+ * is enabled on a cgroup, the depended-upon subsystems are enabled
+ * together if available. Subsystems enabled due to dependency are
+ * not visible to userland until explicitly enabled. The following
+ * specifies the mask of subsystems that this one depends on.
+ */
+ unsigned int depends_on;
+};
+
+extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
+/**
+ * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_begin() and allows cgroup operations to
+ * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ */
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+ percpu_down_read(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_end(). Counterpart of
+ * cgroup_threadcgroup_change_begin().
+ */
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+ percpu_up_read(&cgroup_threadgroup_rwsem);
+}
+
+#else /* CONFIG_CGROUPS */
+
+#define CGROUP_CANFORK_COUNT 0
+#define CGROUP_SUBSYS_COUNT 0
+
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
+
+#endif /* CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/kernel/include/linux/cgroup.h b/kernel/include/linux/cgroup.h
index 3bd502178..cb91b44f5 100644
--- a/kernel/include/linux/cgroup.h
+++ b/kernel/include/linux/cgroup.h
@@ -11,96 +11,273 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
-#include <linux/rwsem.h>
-#include <linux/idr.h>
-#include <linux/workqueue.h>
#include <linux/fs.h>
-#include <linux/percpu-refcount.h>
#include <linux/seq_file.h>
#include <linux/kernfs.h>
-#include <linux/wait.h>
-#include <linux/work-simple.h>
+#include <linux/jump_label.h>
+
+#include <linux/cgroup-defs.h>
#ifdef CONFIG_CGROUPS
-struct cgroup_root;
-struct cgroup_subsys;
-struct cgroup;
+/*
+ * All weight knobs on the default hierarhcy should use the following min,
+ * default and max values. The default value is the logarithmic center of
+ * MIN and MAX and allows 100x to be expressed in both directions.
+ */
+#define CGROUP_WEIGHT_MIN 1
+#define CGROUP_WEIGHT_DFL 100
+#define CGROUP_WEIGHT_MAX 10000
-extern int cgroup_init_early(void);
-extern int cgroup_init(void);
-extern void cgroup_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
-extern void cgroup_exit(struct task_struct *p);
-extern int cgroupstats_build(struct cgroupstats *stats,
- struct dentry *dentry);
+/* a css_task_iter should be treated as an opaque object */
+struct css_task_iter {
+ struct cgroup_subsys *ss;
-extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *tsk);
+ struct list_head *cset_pos;
+ struct list_head *cset_head;
-/* define the enumeration of all cgroup subsystems */
-#define SUBSYS(_x) _x ## _cgrp_id,
-enum cgroup_subsys_id {
-#include <linux/cgroup_subsys.h>
- CGROUP_SUBSYS_COUNT,
+ struct list_head *task_pos;
+ struct list_head *tasks_head;
+ struct list_head *mg_tasks_head;
+
+ struct css_set *cur_cset;
+ struct task_struct *cur_task;
+ struct list_head iters_node; /* css_set->task_iters */
};
+
+extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
+
+#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+#define SUBSYS(_x) \
+ extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
+ extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
+#include <linux/cgroup_subsys.h>
#undef SUBSYS
+/**
+ * cgroup_subsys_enabled - fast test on whether a subsys is enabled
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_enabled(ss) \
+ static_branch_likely(&ss ## _enabled_key)
+
+/**
+ * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_on_dfl(ss) \
+ static_branch_likely(&ss ## _on_dfl_key)
+
+bool css_has_online_children(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
+struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss);
+
+bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cftype *cfts);
+void cgroup_file_notify(struct cgroup_file *cfile);
+
+char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
+int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
+
+void cgroup_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_post_fork(struct task_struct *p,
+ void *old_ss_priv[CGROUP_CANFORK_COUNT]);
+void cgroup_exit(struct task_struct *p);
+void cgroup_free(struct task_struct *p);
+
+int cgroup_init_early(void);
+int cgroup_init(void);
+
/*
- * Per-subsystem/per-cgroup state maintained by the system. This is the
- * fundamental structural building block that controllers deal with.
+ * Iteration helpers and macros.
+ */
+
+struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *parent);
+struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
+struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
+
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it);
+struct task_struct *css_task_iter_next(struct css_task_iter *it);
+void css_task_iter_end(struct css_task_iter *it);
+
+/**
+ * css_for_each_child - iterate through children of a css
+ * @pos: the css * to use as the loop cursor
+ * @parent: css whose children to walk
+ *
+ * Walk @parent's children. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
*
- * Fields marked with "PI:" are public and immutable and may be accessed
- * directly without synchronization.
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
*/
-struct cgroup_subsys_state {
- /* PI: the cgroup that this css is attached to */
- struct cgroup *cgroup;
-
- /* PI: the cgroup subsystem that this css is attached to */
- struct cgroup_subsys *ss;
-
- /* reference count - access via css_[try]get() and css_put() */
- struct percpu_ref refcnt;
-
- /* PI: the parent css */
- struct cgroup_subsys_state *parent;
-
- /* siblings list anchored at the parent's ->children */
- struct list_head sibling;
- struct list_head children;
-
- /*
- * PI: Subsys-unique ID. 0 is unused and root is always 1. The
- * matching css can be looked up using css_from_id().
- */
- int id;
-
- unsigned int flags;
-
- /*
- * Monotonically increasing unique serial number which defines a
- * uniform order among all csses. It's guaranteed that all
- * ->children lists are in the ascending order of ->serial_nr and
- * used to allow interrupting and resuming iterations.
- */
- u64 serial_nr;
-
- /* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
- struct work_struct destroy_work;
- struct swork_event destroy_swork;
-};
+#define css_for_each_child(pos, parent) \
+ for ((pos) = css_next_child(NULL, (parent)); (pos); \
+ (pos) = css_next_child((pos), (parent)))
-/* bits in struct cgroup_subsys_state flags field */
-enum {
- CSS_NO_REF = (1 << 0), /* no reference counting for this css */
- CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
- CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
-};
+/**
+ * css_for_each_descendant_pre - pre-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @root: css whose descendants to walk
+ *
+ * Walk @root's descendants. @root is included in the iteration and the
+ * first node to be visited. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * For example, the following guarantees that a descendant can't escape
+ * state updates of its ancestors.
+ *
+ * my_online(@css)
+ * {
+ * Lock @css's parent and @css;
+ * Inherit state from the parent;
+ * Unlock both.
+ * }
+ *
+ * my_update_state(@css)
+ * {
+ * css_for_each_descendant_pre(@pos, @css) {
+ * Lock @pos;
+ * if (@pos == @css)
+ * Update @css's state;
+ * else
+ * Verify @pos is alive and inherit state from its parent;
+ * Unlock @pos;
+ * }
+ * }
+ *
+ * As long as the inheriting step, including checking the parent state, is
+ * enclosed inside @pos locking, double-locking the parent isn't necessary
+ * while inheriting. The state update to the parent is guaranteed to be
+ * visible by walking order and, as long as inheriting operations to the
+ * same @pos are atomic to each other, multiple updates racing each other
+ * still result in the correct state. It's guaranateed that at least one
+ * inheritance happens for any css after the latest update to its parent.
+ *
+ * If checking parent's state requires locking the parent, each inheriting
+ * iteration should lock and unlock both @pos->parent and @pos.
+ *
+ * Alternatively, a subsystem may choose to use a single global lock to
+ * synchronize ->css_online() and ->css_offline() against tree-walking
+ * operations.
+ *
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
+ */
+#define css_for_each_descendant_pre(pos, css) \
+ for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_pre((pos), (css)))
+
+/**
+ * css_for_each_descendant_post - post-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @css: css whose descendants to walk
+ *
+ * Similar to css_for_each_descendant_pre() but performs post-order
+ * traversal instead. @root is included in the iteration and the last
+ * node to be visited.
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * Note that the walk visibility guarantee example described in pre-order
+ * walk doesn't apply the same to post-order walks.
+ */
+#define css_for_each_descendant_post(pos, css) \
+ for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_post((pos), (css)))
+
+/**
+ * cgroup_taskset_for_each - iterate cgroup_taskset
+ * @task: the loop cursor
+ * @dst_css: the destination css
+ * @tset: taskset to iterate
+ *
+ * @tset may contain multiple tasks and they may belong to multiple
+ * processes.
+ *
+ * On the v2 hierarchy, there may be tasks from multiple processes and they
+ * may not share the source or destination csses.
+ *
+ * On traditional hierarchies, when there are multiple tasks in @tset, if a
+ * task of a process is in @tset, all tasks of the process are in @tset.
+ * Also, all are guaranteed to share the same source and destination csses.
+ *
+ * Iteration is not in any specific order.
+ */
+#define cgroup_taskset_for_each(task, dst_css, tset) \
+ for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
+ (task); \
+ (task) = cgroup_taskset_next((tset), &(dst_css)))
+
+/**
+ * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
+ * @leader: the loop cursor
+ * @dst_css: the destination css
+ * @tset: takset to iterate
+ *
+ * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
+ * may not contain any.
+ */
+#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
+ for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
+ (leader); \
+ (leader) = cgroup_taskset_next((tset), &(dst_css))) \
+ if ((leader) != (leader)->group_leader) \
+ ; \
+ else
+
+/*
+ * Inline functions.
+ */
/**
* css_get - obtain a reference on the specified css
@@ -187,532 +364,6 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
percpu_ref_put_many(&css->refcnt, n);
}
-/* bits in struct cgroup flags field */
-enum {
- /* Control Group requires release notifications to userspace */
- CGRP_NOTIFY_ON_RELEASE,
- /*
- * Clone the parent's configuration when creating a new child
- * cpuset cgroup. For historical reasons, this option can be
- * specified at mount time and thus is implemented here.
- */
- CGRP_CPUSET_CLONE_CHILDREN,
-};
-
-struct cgroup {
- /* self css with NULL ->ss, points back to this cgroup */
- struct cgroup_subsys_state self;
-
- unsigned long flags; /* "unsigned long" so bitops work */
-
- /*
- * idr allocated in-hierarchy ID.
- *
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
- *
- * Allocating/Removing ID must be protected by cgroup_mutex.
- */
- int id;
-
- /*
- * If this cgroup contains any tasks, it contributes one to
- * populated_cnt. All children with non-zero popuplated_cnt of
- * their own contribute one. The count is zero iff there's no task
- * in this cgroup or its subtree.
- */
- int populated_cnt;
-
- struct kernfs_node *kn; /* cgroup kernfs entry */
- struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
-
- /*
- * The bitmask of subsystems enabled on the child cgroups.
- * ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_subsys_mask is the
- * effective one which may have more subsystems enabled.
- * Controller knobs are made available iff it's enabled in
- * ->subtree_control.
- */
- unsigned int subtree_control;
- unsigned int child_subsys_mask;
-
- /* Private pointers for each registered subsystem */
- struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
-
- struct cgroup_root *root;
-
- /*
- * List of cgrp_cset_links pointing at css_sets with tasks in this
- * cgroup. Protected by css_set_lock.
- */
- struct list_head cset_links;
-
- /*
- * On the default hierarchy, a css_set for a cgroup with some
- * susbsys disabled will point to css's which are associated with
- * the closest ancestor which has the subsys enabled. The
- * following lists all css_sets which point to this cgroup's css
- * for the given subsystem.
- */
- struct list_head e_csets[CGROUP_SUBSYS_COUNT];
-
- /*
- * list of pidlists, up to two for each namespace (one for procs, one
- * for tasks); created on demand.
- */
- struct list_head pidlists;
- struct mutex pidlist_mutex;
-
- /* used to wait for offlining of csses */
- wait_queue_head_t offline_waitq;
-
- /* used to schedule release agent */
- struct work_struct release_agent_work;
-};
-
-#define MAX_CGROUP_ROOT_NAMELEN 64
-
-/* cgroup_root->flags */
-enum {
- CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
- CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
- CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
-};
-
-/*
- * A cgroup_root represents the root of a cgroup hierarchy, and may be
- * associated with a kernfs_root to form an active hierarchy. This is
- * internal to cgroup core. Don't access directly from controllers.
- */
-struct cgroup_root {
- struct kernfs_root *kf_root;
-
- /* The bitmask of subsystems attached to this hierarchy */
- unsigned int subsys_mask;
-
- /* Unique id for this hierarchy. */
- int hierarchy_id;
-
- /* The root cgroup. Root is destroyed on its release. */
- struct cgroup cgrp;
-
- /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
- atomic_t nr_cgrps;
-
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
- /* Hierarchy-specific flags */
- unsigned int flags;
-
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
- /* The path to use for release notifications. */
- char release_agent_path[PATH_MAX];
-
- /* The name for this hierarchy - may be empty */
- char name[MAX_CGROUP_ROOT_NAMELEN];
-};
-
-/*
- * A css_set is a structure holding pointers to a set of
- * cgroup_subsys_state objects. This saves space in the task struct
- * object and speeds up fork()/exit(), since a single inc/dec and a
- * list_add()/del() can bump the reference count on the entire cgroup
- * set for a task.
- */
-
-struct css_set {
-
- /* Reference count */
- atomic_t refcount;
-
- /*
- * List running through all cgroup groups in the same hash
- * slot. Protected by css_set_lock
- */
- struct hlist_node hlist;
-
- /*
- * Lists running through all tasks using this cgroup group.
- * mg_tasks lists tasks which belong to this cset but are in the
- * process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
- * mg_tasks, it can be read safely while holding cgroup_mutex.
- */
- struct list_head tasks;
- struct list_head mg_tasks;
-
- /*
- * List of cgrp_cset_links pointing at cgroups referenced from this
- * css_set. Protected by css_set_lock.
- */
- struct list_head cgrp_links;
-
- /* the default cgroup associated with this css_set */
- struct cgroup *dfl_cgrp;
-
- /*
- * Set of subsystem states, one for each subsystem. This array is
- * immutable after creation apart from the init_css_set during
- * subsystem registration (at boot time).
- */
- struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
-
- /*
- * List of csets participating in the on-going migration either as
- * source or destination. Protected by cgroup_mutex.
- */
- struct list_head mg_preload_node;
- struct list_head mg_node;
-
- /*
- * If this cset is acting as the source of migration the following
- * two fields are set. mg_src_cgrp is the source cgroup of the
- * on-going migration and mg_dst_cset is the destination cset the
- * target tasks on this cset should be migrated to. Protected by
- * cgroup_mutex.
- */
- struct cgroup *mg_src_cgrp;
- struct css_set *mg_dst_cset;
-
- /*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
- * attached to an ancestor instead of the cgroup this css_set is
- * associated with. The following node is anchored at
- * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
- * iterate through all css's attached to a given cgroup.
- */
- struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
-
- /* For RCU-protected deletion */
- struct rcu_head rcu_head;
-};
-
-/*
- * struct cftype: handler definitions for cgroup control files
- *
- * When reading/writing to a file:
- * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
- * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
- */
-
-/* cftype->flags */
-enum {
- CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
- CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
- CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
-
- /* internal flags, do not use outside cgroup core proper */
- __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
- __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
-};
-
-#define MAX_CFTYPE_NAME 64
-
-struct cftype {
- /*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
- */
- char name[MAX_CFTYPE_NAME];
- int private;
- /*
- * If not 0, file mode is set to this value, otherwise it will
- * be figured out automatically
- */
- umode_t mode;
-
- /*
- * The maximum length of string, excluding trailing nul, that can
- * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
- */
- size_t max_write_len;
-
- /* CFTYPE_* flags */
- unsigned int flags;
-
- /*
- * Fields used for internal bookkeeping. Initialized automatically
- * during registration.
- */
- struct cgroup_subsys *ss; /* NULL for cgroup core files */
- struct list_head node; /* anchored at ss->cfts */
- struct kernfs_ops *kf_ops;
-
- /*
- * read_u64() is a shortcut for the common case of returning a
- * single integer. Use it in place of read()
- */
- u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
- /*
- * read_s64() is a signed version of read_u64()
- */
- s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
-
- /* generic seq_file read interface */
- int (*seq_show)(struct seq_file *sf, void *v);
-
- /* optional ops, implement all or none */
- void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
- void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
- void (*seq_stop)(struct seq_file *sf, void *v);
-
- /*
- * write_u64() is a shortcut for the common case of accepting
- * a single integer (as parsed by simple_strtoull) from
- * userspace. Use in place of write(); return 0 or error.
- */
- int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val);
- /*
- * write_s64() is a signed version of write_u64()
- */
- int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
- s64 val);
-
- /*
- * write() is the generic write callback which maps directly to
- * kernfs write operation and overrides all other operations.
- * Maximum write size is determined by ->max_write_len. Use
- * of_css/cft() to access the associated css and cft.
- */
- ssize_t (*write)(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
-};
-
-extern struct cgroup_root cgrp_dfl_root;
-extern struct css_set init_css_set;
-
-/**
- * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
- * @cgrp: the cgroup of interest
- *
- * The default hierarchy is the v2 interface of cgroup and this function
- * can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
- * interface version.
- *
- * The set of behaviors which change on the default hierarchy are still
- * being determined and the mount option is prefixed with __DEVEL__.
- *
- * List of changed behaviors:
- *
- * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
- * and "name" are disallowed.
- *
- * - When mounting an existing superblock, mount options should match.
- *
- * - Remount is disallowed.
- *
- * - rename(2) is disallowed.
- *
- * - "tasks" is removed. Everything should be at process granularity. Use
- * "cgroup.procs" instead.
- *
- * - "cgroup.procs" is not sorted. pids will be unique unless they got
- * recycled inbetween reads.
- *
- * - "release_agent" and "notify_on_release" are removed. Replacement
- * notification mechanism will be implemented.
- *
- * - "cgroup.clone_children" is removed.
- *
- * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
- * and its descendants contain no task; otherwise, 1. The file also
- * generates kernfs notification which can be monitored through poll and
- * [di]notify when the value of the file changes.
- *
- * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
- * take masks of ancestors with non-empty cpus/mems, instead of being
- * moved to an ancestor.
- *
- * - cpuset: a task can be moved into an empty cpuset, and again it takes
- * masks of ancestors.
- *
- * - memcg: use_hierarchy is on by default and the cgroup file for the flag
- * is not created.
- *
- * - blkcg: blk-throttle becomes properly hierarchical.
- *
- * - debug: disallowed on the default hierarchy.
- */
-static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
-{
- return cgrp->root == &cgrp_dfl_root;
-}
-
-/* no synchronization, the result can only be used as a hint */
-static inline bool cgroup_has_tasks(struct cgroup *cgrp)
-{
- return !list_empty(&cgrp->cset_links);
-}
-
-/* returns ino associated with a cgroup */
-static inline ino_t cgroup_ino(struct cgroup *cgrp)
-{
- return cgrp->kn->ino;
-}
-
-/* cft/css accessors for cftype->write() operation */
-static inline struct cftype *of_cft(struct kernfs_open_file *of)
-{
- return of->kn->priv;
-}
-
-struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
-
-/* cft/css accessors for cftype->seq_*() operations */
-static inline struct cftype *seq_cft(struct seq_file *seq)
-{
- return of_cft(seq->private);
-}
-
-static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
-{
- return of_css(seq->private);
-}
-
-/*
- * Name / path handling functions. All are thin wrappers around the kernfs
- * counterparts and can be called under any context.
- */
-
-static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
-{
- return kernfs_name(cgrp->kn, buf, buflen);
-}
-
-static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
- size_t buflen)
-{
- return kernfs_path(cgrp->kn, buf, buflen);
-}
-
-static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
-{
- pr_cont_kernfs_name(cgrp->kn);
-}
-
-static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
-{
- pr_cont_kernfs_path(cgrp->kn);
-}
-
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
-
-int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_rm_cftypes(struct cftype *cfts);
-
-bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
-
-/*
- * Control Group taskset, used to pass around set of tasks to cgroup_subsys
- * methods.
- */
-struct cgroup_taskset;
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
-
-/**
- * cgroup_taskset_for_each - iterate cgroup_taskset
- * @task: the loop cursor
- * @tset: taskset to iterate
- */
-#define cgroup_taskset_for_each(task, tset) \
- for ((task) = cgroup_taskset_first((tset)); (task); \
- (task) = cgroup_taskset_next((tset)))
-
-/*
- * Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
- */
-
-struct cgroup_subsys {
- struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
- int (*css_online)(struct cgroup_subsys_state *css);
- void (*css_offline)(struct cgroup_subsys_state *css);
- void (*css_released)(struct cgroup_subsys_state *css);
- void (*css_free)(struct cgroup_subsys_state *css);
- void (*css_reset)(struct cgroup_subsys_state *css);
- void (*css_e_css_changed)(struct cgroup_subsys_state *css);
-
- int (*can_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*cancel_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*fork)(struct task_struct *task);
- void (*exit)(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task);
- void (*bind)(struct cgroup_subsys_state *root_css);
-
- int disabled;
- int early_init;
-
- /*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
- *
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
- */
- bool broken_hierarchy;
- bool warned_broken_hierarchy;
-
- /* the following two fields are initialized automtically during boot */
- int id;
-#define MAX_CGROUP_TYPE_NAMELEN 32
- const char *name;
-
- /* link to parent, protected by cgroup_lock() */
- struct cgroup_root *root;
-
- /* idr for css->id */
- struct idr css_idr;
-
- /*
- * List of cftypes. Each entry is the first entry of an array
- * terminated by zero length name.
- */
- struct list_head cfts;
-
- /*
- * Base cftypes which are automatically registered. The two can
- * point to the same array.
- */
- struct cftype *dfl_cftypes; /* for the default hierarchy */
- struct cftype *legacy_cftypes; /* for the legacy hierarchies */
-
- /*
- * A subsystem may depend on other subsystems. When such subsystem
- * is enabled on a cgroup, the depended-upon subsystems are enabled
- * together if available. Subsystems enabled due to dependency are
- * not visible to userland until explicitly enabled. The following
- * specifies the mask of subsystems that this one depends on.
- */
- unsigned int depends_on;
-};
-
-#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
-#include <linux/cgroup_subsys.h>
-#undef SUBSYS
-
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -728,11 +379,11 @@ struct cgroup_subsys {
*/
#ifdef CONFIG_PROVE_RCU
extern struct mutex cgroup_mutex;
-extern struct rw_semaphore css_set_rwsem;
+extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
lockdep_is_held(&cgroup_mutex) || \
- lockdep_is_held(&css_set_rwsem) || \
+ lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
#else
#define task_css_set_check(task, __c) \
@@ -776,6 +427,31 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
}
/**
+ * task_get_css - find and get the css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Find the css for the (@task, @subsys_id) combination, increment a
+ * reference on and return it. This function is guaranteed to return a
+ * valid css.
+ */
+static inline struct cgroup_subsys_state *
+task_get_css(struct task_struct *task, int subsys_id)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+ while (true) {
+ css = task_css(task, subsys_id);
+ if (likely(css_tryget_online(css)))
+ break;
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return css;
+}
+
+/**
* task_css_is_root - test whether a task belongs to the root css
* @task: the target task
* @subsys_id: the target subsystem ID
@@ -795,178 +471,86 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
-struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *parent);
-
-struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+/* no synchronization, the result can only be used as a hint */
+static inline bool cgroup_is_populated(struct cgroup *cgrp)
+{
+ return cgrp->populated_cnt;
+}
-/**
- * css_for_each_child - iterate through children of a css
- * @pos: the css * to use as the loop cursor
- * @parent: css whose children to walk
- *
- * Walk @parent's children. Must be called under rcu_read_lock().
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * It is allowed to temporarily drop RCU read lock during iteration. The
- * caller is responsible for ensuring that @pos remains accessible until
- * the start of the next iteration by, for example, bumping the css refcnt.
- */
-#define css_for_each_child(pos, parent) \
- for ((pos) = css_next_child(NULL, (parent)); (pos); \
- (pos) = css_next_child((pos), (parent)))
+/* returns ino associated with a cgroup */
+static inline ino_t cgroup_ino(struct cgroup *cgrp)
+{
+ return cgrp->kn->ino;
+}
-struct cgroup_subsys_state *
-css_next_descendant_pre(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *css);
+/* cft/css accessors for cftype->write() operation */
+static inline struct cftype *of_cft(struct kernfs_open_file *of)
+{
+ return of->kn->priv;
+}
-struct cgroup_subsys_state *
-css_rightmost_descendant(struct cgroup_subsys_state *pos);
+struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
-/**
- * css_for_each_descendant_pre - pre-order walk of a css's descendants
- * @pos: the css * to use as the loop cursor
- * @root: css whose descendants to walk
- *
- * Walk @root's descendants. @root is included in the iteration and the
- * first node to be visited. Must be called under rcu_read_lock().
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * For example, the following guarantees that a descendant can't escape
- * state updates of its ancestors.
- *
- * my_online(@css)
- * {
- * Lock @css's parent and @css;
- * Inherit state from the parent;
- * Unlock both.
- * }
- *
- * my_update_state(@css)
- * {
- * css_for_each_descendant_pre(@pos, @css) {
- * Lock @pos;
- * if (@pos == @css)
- * Update @css's state;
- * else
- * Verify @pos is alive and inherit state from its parent;
- * Unlock @pos;
- * }
- * }
- *
- * As long as the inheriting step, including checking the parent state, is
- * enclosed inside @pos locking, double-locking the parent isn't necessary
- * while inheriting. The state update to the parent is guaranteed to be
- * visible by walking order and, as long as inheriting operations to the
- * same @pos are atomic to each other, multiple updates racing each other
- * still result in the correct state. It's guaranateed that at least one
- * inheritance happens for any css after the latest update to its parent.
- *
- * If checking parent's state requires locking the parent, each inheriting
- * iteration should lock and unlock both @pos->parent and @pos.
- *
- * Alternatively, a subsystem may choose to use a single global lock to
- * synchronize ->css_online() and ->css_offline() against tree-walking
- * operations.
- *
- * It is allowed to temporarily drop RCU read lock during iteration. The
- * caller is responsible for ensuring that @pos remains accessible until
- * the start of the next iteration by, for example, bumping the css refcnt.
- */
-#define css_for_each_descendant_pre(pos, css) \
- for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
- (pos) = css_next_descendant_pre((pos), (css)))
+/* cft/css accessors for cftype->seq_*() operations */
+static inline struct cftype *seq_cft(struct seq_file *seq)
+{
+ return of_cft(seq->private);
+}
-struct cgroup_subsys_state *
-css_next_descendant_post(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *css);
+static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
+{
+ return of_css(seq->private);
+}
-/**
- * css_for_each_descendant_post - post-order walk of a css's descendants
- * @pos: the css * to use as the loop cursor
- * @css: css whose descendants to walk
- *
- * Similar to css_for_each_descendant_pre() but performs post-order
- * traversal instead. @root is included in the iteration and the last
- * node to be visited.
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * Note that the walk visibility guarantee example described in pre-order
- * walk doesn't apply the same to post-order walks.
+/*
+ * Name / path handling functions. All are thin wrappers around the kernfs
+ * counterparts and can be called under any context.
*/
-#define css_for_each_descendant_post(pos, css) \
- for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
- (pos) = css_next_descendant_post((pos), (css)))
-
-bool css_has_online_children(struct cgroup_subsys_state *css);
-
-/* A css_task_iter should be treated as an opaque object */
-struct css_task_iter {
- struct cgroup_subsys *ss;
- struct list_head *cset_pos;
- struct list_head *cset_head;
-
- struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
-};
+static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
+{
+ return kernfs_name(cgrp->kn, buf, buflen);
+}
-void css_task_iter_start(struct cgroup_subsys_state *css,
- struct css_task_iter *it);
-struct task_struct *css_task_iter_next(struct css_task_iter *it);
-void css_task_iter_end(struct css_task_iter *it);
+static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
+ size_t buflen)
+{
+ return kernfs_path(cgrp->kn, buf, buflen);
+}
-int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
-int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
+{
+ pr_cont_kernfs_name(cgrp->kn);
+}
-struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
- struct cgroup_subsys *ss);
-struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
- struct cgroup_subsys *ss);
+static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
+{
+ pr_cont_kernfs_path(cgrp->kn);
+}
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
-static inline int cgroup_init_early(void) { return 0; }
-static inline int cgroup_init(void) { return 0; }
-static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
-static inline void cgroup_exit(struct task_struct *p) {}
-
+static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
- struct dentry *dentry)
-{
- return -EINVAL;
-}
+ struct dentry *dentry) { return -EINVAL; }
-static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT])
+{ return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
+static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_free(struct task_struct *p) {}
-/* No cgroups - nothing to do */
-static inline int cgroup_attach_task_all(struct task_struct *from,
- struct task_struct *t)
-{
- return 0;
-}
+static inline int cgroup_init_early(void) { return 0; }
+static inline int cgroup_init(void) { return 0; }
#endif /* !CONFIG_CGROUPS */
diff --git a/kernel/include/linux/cgroup_subsys.h b/kernel/include/linux/cgroup_subsys.h
index e4a96fb14..1a96fdaa3 100644
--- a/kernel/include/linux/cgroup_subsys.h
+++ b/kernel/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
+
+/*
+ * This file *must* be included with SUBSYS() defined.
+ * SUBSYS_TAG() is a noop if undefined.
+ */
+
+#ifndef SUBSYS_TAG
+#define __TMP_SUBSYS_TAG
+#define SUBSYS_TAG(_x)
+#endif
+
#if IS_ENABLED(CONFIG_CPUSETS)
SUBSYS(cpuset)
#endif
@@ -16,7 +27,7 @@ SUBSYS(cpuacct)
#endif
#if IS_ENABLED(CONFIG_BLK_CGROUP)
-SUBSYS(blkio)
+SUBSYS(io)
#endif
#if IS_ENABLED(CONFIG_MEMCG)
@@ -48,11 +59,28 @@ SUBSYS(hugetlb)
#endif
/*
+ * Subsystems that implement the can_fork() family of callbacks.
+ */
+SUBSYS_TAG(CANFORK_START)
+
+#if IS_ENABLED(CONFIG_CGROUP_PIDS)
+SUBSYS(pids)
+#endif
+
+SUBSYS_TAG(CANFORK_END)
+
+/*
* The following subsystems are not supported on the default hierarchy.
*/
#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
SUBSYS(debug)
#endif
+
+#ifdef __TMP_SUBSYS_TAG
+#undef __TMP_SUBSYS_TAG
+#undef SUBSYS_TAG
+#endif
+
/*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
diff --git a/kernel/include/linux/clk-provider.h b/kernel/include/linux/clk-provider.h
index df695313f..c56988ac6 100644
--- a/kernel/include/linux/clk-provider.h
+++ b/kernel/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -31,12 +30,36 @@
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
+#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
+struct clk;
struct clk_hw;
struct clk_core;
struct dentry;
/**
+ * struct clk_rate_request - Structure encoding the clk constraints that
+ * a clock user might require.
+ *
+ * @rate: Requested clock rate. This field will be adjusted by
+ * clock drivers according to hardware capabilities.
+ * @min_rate: Minimum rate imposed by clk users.
+ * @max_rate: Maximum rate a imposed by clk users.
+ * @best_parent_rate: The best parent rate a parent can provide to fulfill the
+ * requested constraints.
+ * @best_parent_hw: The most appropriate parent clock that fulfills the
+ * requested constraints.
+ *
+ */
+struct clk_rate_request {
+ unsigned long rate;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned long best_parent_rate;
+ struct clk_hw *best_parent_hw;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -175,12 +198,8 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
- long (*determine_rate)(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw);
+ int (*determine_rate)(struct clk_hw *hw,
+ struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -209,7 +228,7 @@ struct clk_ops {
struct clk_init_data {
const char *name;
const struct clk_ops *ops;
- const char **parent_names;
+ const char * const *parent_names;
u8 num_parents;
unsigned long flags;
};
@@ -342,6 +361,9 @@ struct clk_div_table {
* to the closest integer instead of the up one.
* CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
* not be changed by the clock framework.
+ * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
+ * except when the value read from the register is zero, the divisor is
+ * 2^width of the field.
*/
struct clk_divider {
struct clk_hw hw;
@@ -359,6 +381,7 @@ struct clk_divider {
#define CLK_DIVIDER_HIWORD_MASK BIT(3)
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
+#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
@@ -426,12 +449,14 @@ extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
- const char **parent_names, u8 num_parents, unsigned long flags,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
- const char **parent_names, u8 num_parents, unsigned long flags,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
@@ -457,7 +482,7 @@ struct clk_fixed_factor {
unsigned int div;
};
-extern struct clk_ops clk_fixed_factor_ops;
+extern const struct clk_ops clk_fixed_factor_ops;
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
@@ -475,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
*
* Clock with adjustable fractional divider affecting its output frequency.
*/
-
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
+ u8 mwidth;
u32 mmask;
u8 nshift;
+ u8 nwidth;
u32 nmask;
u8 flags;
spinlock_t *lock;
@@ -493,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock);
+/**
+ * struct clk_multiplier - adjustable multiplier clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the multiplier
+ * @shift: shift to the multiplier bit field
+ * @width: width of the multiplier bit field
+ * @lock: register lock
+ *
+ * Clock with an adjustable multiplier affecting its output frequency.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ *
+ * Flags:
+ * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+ * from the register, with 0 being a valid value effectively
+ * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+ * set, then a null multiplier will be considered as a bypass,
+ * leaving the parent rate unmodified.
+ * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be
+ * rounded to the closest integer instead of the down one.
+ */
+struct clk_multiplier {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
+
+extern const struct clk_ops clk_multiplier_ops;
+
/***
* struct clk_composite - aggregate clock of mux, divider and gate clocks
*
@@ -518,7 +579,7 @@ struct clk_composite {
};
struct clk *clk_register_composite(struct device *dev, const char *name,
- const char **parent_names, int num_parents,
+ const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -547,6 +608,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
void of_gpio_clk_gate_setup(struct device_node *node);
/**
+ * struct clk_gpio_mux - gpio controlled clock multiplexer
+ *
+ * @hw: see struct clk_gpio
+ * @gpiod: gpio descriptor to select the parent of this clock multiplexer
+ *
+ * Clock with a gpio control for selecting the parent clock.
+ * Implements .get_parent, .set_parent and .determine_rate
+ */
+
+extern const struct clk_ops clk_gpio_mux_ops;
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags);
+
+void of_gpio_mux_clk_setup(struct device_node *node);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -564,31 +642,29 @@ void clk_unregister(struct clk *clk);
void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
-const char *__clk_get_name(struct clk *clk);
+const char *__clk_get_name(const struct clk *clk);
+const char *clk_hw_get_name(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
-u8 __clk_get_num_parents(struct clk *clk);
-struct clk *__clk_get_parent(struct clk *clk);
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
+ unsigned int index);
unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned long __clk_get_rate(struct clk *clk);
+unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
-bool __clk_is_prepared(struct clk *clk);
+unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+bool clk_hw_is_prepared(const struct clk_hw *hw);
+bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
-unsigned long __clk_determine_rate(struct clk_hw *core,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate);
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
+int __clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+ struct clk_rate_request *req);
+void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ unsigned long max_rate);
static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
{
@@ -599,7 +675,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
/*
* FIXME clock api without lock protection
*/
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
@@ -624,6 +700,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
int of_clk_get_parent_count(struct device_node *np);
+int of_clk_parent_fill(struct device_node *np, const char **parents,
+ unsigned int size);
const char *of_clk_get_parent_name(struct device_node *np, int index);
void of_clk_init(const struct of_device_id *matches);
@@ -649,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get(
{
return ERR_PTR(-ENOENT);
}
+static inline int of_clk_get_parent_count(struct device_node *np)
+{
+ return 0;
+}
+static inline int of_clk_parent_fill(struct device_node *np,
+ const char **parents, unsigned int size)
+{
+ return 0;
+}
static inline const char *of_clk_get_parent_name(struct device_node *np,
int index)
{
diff --git a/kernel/include/linux/clk.h b/kernel/include/linux/clk.h
index 68c16a6be..0df4a51e1 100644
--- a/kernel/include/linux/clk.h
+++ b/kernel/include/linux/clk.h
@@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * This answers the question "if I were to pass @rate to clk_set_rate(),
+ * what clock rate would I end up with?" without changing the hardware
+ * in any way. In other words:
+ *
+ * rate = clk_round_rate(clk, r);
+ *
+ * and:
+ *
+ * clk_set_rate(clk, r);
+ * rate = clk_get_rate(clk);
+ *
+ * are equivalent except the former does not modify the clock hardware
+ * in any way.
+ *
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-/**
- * clk_add_alias - add a new clock alias
- * @alias: name for clock alias
- * @alias_dev_name: device name
- * @id: platform specific clock name
- * @dev: device
- *
- * Allows using generic clock names for drivers by adding a new alias.
- * Assumes clkdev, see clkdev.h for more info.
- */
-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
- struct device *dev);
-
struct device_node;
struct of_phandle_args;
diff --git a/kernel/include/linux/clk/at91_pmc.h b/kernel/include/linux/clk/at91_pmc.h
index 7669f7618..17f413bbb 100644
--- a/kernel/include/linux/clk/at91_pmc.h
+++ b/kernel/include/linux/clk/at91_pmc.h
@@ -16,18 +16,6 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_pmc_base;
-
-#define at91_pmc_read(field) \
- readl_relaxed(at91_pmc_base + field)
-
-#define at91_pmc_write(field, value) \
- writel_relaxed(value, at91_pmc_base + field)
-#else
-.extern at91_pmc_base
-#endif
-
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
@@ -164,6 +152,7 @@ extern void __iomem *at91_pmc_base;
#define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
+#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
@@ -182,13 +171,18 @@ extern void __iomem *at91_pmc_base;
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
-#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */
-#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
-#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */
-#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */
-#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */
-#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */
-#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */
-#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
+#define AT91_PMC_PCR_PID_MASK 0x3f
+#define AT91_PMC_PCR_GCKCSS_OFFSET 8
+#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET)
+#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */
+#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
+#define AT91_PMC_PCR_DIV_OFFSET 16
+#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET)
+#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */
+#define AT91_PMC_PCR_GCKDIV_OFFSET 20
+#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET)
+#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */
+#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
+#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
#endif
diff --git a/kernel/include/linux/clk/clk-conf.h b/kernel/include/linux/clk/clk-conf.h
index f3050e15f..e0c362363 100644
--- a/kernel/include/linux/clk/clk-conf.h
+++ b/kernel/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
struct device_node;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/kernel/include/linux/clk/shmobile.h b/kernel/include/linux/clk/shmobile.h
index 63a8159c4..cb19cc186 100644
--- a/kernel/include/linux/clk/shmobile.h
+++ b/kernel/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
#include <linux/types.h>
+struct device;
+struct device_node;
+struct generic_pm_domain;
+
void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+void cpg_mstp_add_clk_domain(struct device_node *np);
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#else
+static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#endif
+
#endif
diff --git a/kernel/include/linux/clk/tegra.h b/kernel/include/linux/clk/tegra.h
index 19c4208f4..57bf7aab4 100644
--- a/kernel/include/linux/clk/tegra.h
+++ b/kernel/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
#ifndef __LINUX_CLK_TEGRA_H_
#define __LINUX_CLK_TEGRA_H_
-#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/bug.h>
/*
* Tegra CPU clock and reset control ops
diff --git a/kernel/include/linux/clk/ti.h b/kernel/include/linux/clk/ti.h
index 79b76e13d..223be696d 100644
--- a/kernel/include/linux/clk/ti.h
+++ b/kernel/include/linux/clk/ti.h
@@ -188,33 +188,6 @@ struct clk_hw_omap {
/* DPLL Type and DCO Selection Flags */
#define DPLL_J_TYPE 0x1
-/* Composite clock component types */
-enum {
- CLK_COMPONENT_TYPE_GATE = 0,
- CLK_COMPONENT_TYPE_DIVIDER,
- CLK_COMPONENT_TYPE_MUX,
- CLK_COMPONENT_TYPE_MAX,
-};
-
-/**
- * struct ti_dt_clk - OMAP DT clock alias declarations
- * @lk: clock lookup definition
- * @node_name: clock DT node to map to
- */
-struct ti_dt_clk {
- struct clk_lookup lk;
- char *node_name;
-};
-
-#define DT_CLK(dev, con, name) \
- { \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- }, \
- .node_name = name, \
- }
-
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
CLK_MAX_MEMMAPS
};
-typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
-
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
};
/**
- * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clkdm_clk_enable: pointer to clockdomain enable function
+ * @clkdm_clk_disable: pointer to clockdomain disable function
+ * @cm_wait_module_ready: pointer to CM module wait ready function
+ * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
*
- * Low-level register access ops are generally used by the basic clock types
- * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
- * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
- * used by other hardware-specific clock drivers if needed.
+ * Low-level ops are generally used by the basic clock types (clk-gate,
+ * clk-mux, clk-divider etc.) to provide support for various low-level
+ * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
+ * by board code. Low-level ops also contain some other platform specific
+ * operations not provided directly by clock drivers.
*/
struct ti_clk_ll_ops {
u32 (*clk_readl)(void __iomem *reg);
void (*clk_writel)(u32 val, void __iomem *reg);
+ int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
+ int (*clkdm_clk_disable)(struct clockdomain *clkdm,
+ struct clk *clk);
+ int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
+ u8 idlest_shift);
+ int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+ u8 *idlest_reg_id);
};
-extern struct ti_clk_ll_ops *ti_clk_ll_ops;
-
-extern const struct clk_ops ti_clk_divider_ops;
-extern const struct clk_ops ti_clk_mux_ops;
-
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
-int omap3_noncore_dpll_enable(struct clk_hw *hw);
-void omap3_noncore_dpll_disable(struct clk_hw *hw);
-int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate,
- u8 index);
-long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
-long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-u8 omap2_init_dpll_parent(struct clk_hw *hw);
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
void omap2_init_clk_clkdm(struct clk_hw *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
-int omap2_clkops_enable_clkdm(struct clk_hw *hw);
-void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
-int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
- unsigned long parent_rate);
-int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate, u8 index);
-int omap2_dflt_clk_enable(struct clk_hw *hw);
-void omap2_dflt_clk_disable(struct clk_hw *hw);
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
-void omap3_clk_lock_dpll5(void);
+int omap2_clk_enable_autoidle_all(void);
+int omap2_clk_allow_idle(struct clk *clk);
+int omap2_clk_deny_idle(struct clk *clk);
unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
unsigned long parent_rate);
int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
void omap2xxx_clkt_vps_init(void);
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
-void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
-void ti_dt_clocks_register(struct ti_dt_clk *oclks);
-void ti_dt_clk_init_provider(struct device_node *np, int index);
void ti_dt_clk_init_retry_clks(void);
void ti_dt_clockdomains_setup(void);
-int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
- ti_of_clk_init_cb_t func);
-int of_ti_clk_autoidle_setup(struct device_node *node);
-int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
+
+struct regmap;
+
+int omap2_clk_provider_init(struct device_node *parent, int index,
+ struct regmap *syscon, void __iomem *mem);
+void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
int omap3430_dt_clk_init(void);
int omap3630_dt_clk_init(void);
int am35xx_dt_clk_init(void);
-int ti81xx_dt_clk_init(void);
+int dm814x_dt_clk_init(void);
+int dm816x_dt_clk_init(void);
int omap4xxx_dt_clk_init(void);
int omap5xxx_dt_clk_init(void);
int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
int omap2420_dt_clk_init(void);
int omap2430_dt_clk_init(void);
-#ifdef CONFIG_OF
-void of_ti_clk_allow_autoidle_all(void);
-void of_ti_clk_deny_autoidle_all(void);
-#else
-static inline void of_ti_clk_allow_autoidle_all(void) { }
-static inline void of_ti_clk_deny_autoidle_all(void) { }
-#endif
+struct ti_clk_features {
+ u32 flags;
+ long fint_min;
+ long fint_max;
+ long fint_band1_max;
+ long fint_band2_min;
+ u8 dpll_bypass_vals;
+ u8 cm_idlest_val;
+};
+
+#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
+#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
+#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
+
+void ti_clk_setup_features(struct ti_clk_features *features);
+const struct ti_clk_features *ti_clk_get_features(void);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
-extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
-extern const struct clk_hw_omap_ops clkhwops_iclk;
-extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
#ifdef CONFIG_ATAGS
int omap3430_clk_legacy_init(void);
diff --git a/kernel/include/linux/clkdev.h b/kernel/include/linux/clkdev.h
index 94bad77ee..08bffcc46 100644
--- a/kernel/include/linux/clkdev.h
+++ b/kernel/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
const char *dev_id;
const char *con_id;
struct clk *clk;
+ struct clk_hw *clk_hw;
};
#define CLKDEV_INIT(d, n, c) \
@@ -32,15 +33,19 @@ struct clk_lookup {
}
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...);
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...) __printf(3, 4);
+
void clkdev_add_table(struct clk_lookup *, size_t);
-int clk_add_alias(const char *, const char *, char *, struct device *);
+int clk_add_alias(const char *, const char *, const char *, struct device *);
-int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdev(struct clk *, const char *, const char *, ...)
+ __printf(3, 4);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK
diff --git a/kernel/include/linux/clockchips.h b/kernel/include/linux/clockchips.h
index 96c280b2c..bdcf358df 100644
--- a/kernel/include/linux/clockchips.h
+++ b/kernel/include/linux/clockchips.h
@@ -18,15 +18,6 @@
struct clock_event_device;
struct module;
-/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
-enum clock_event_mode {
- CLOCK_EVT_MODE_UNUSED,
- CLOCK_EVT_MODE_SHUTDOWN,
- CLOCK_EVT_MODE_PERIODIC,
- CLOCK_EVT_MODE_ONESHOT,
- CLOCK_EVT_MODE_RESUME,
-};
-
/*
* Possible states of a clock event device.
*
@@ -37,12 +28,15 @@ enum clock_event_mode {
* reached from DETACHED or SHUTDOWN.
* ONESHOT: Device is programmed to generate event only once. Can be reached
* from DETACHED or SHUTDOWN.
+ * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
+ * stopped.
*/
enum clock_event_state {
CLOCK_EVT_STATE_DETACHED,
CLOCK_EVT_STATE_SHUTDOWN,
CLOCK_EVT_STATE_PERIODIC,
CLOCK_EVT_STATE_ONESHOT,
+ CLOCK_EVT_STATE_ONESHOT_STOPPED,
};
/*
@@ -83,15 +77,14 @@ enum clock_event_state {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
- * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
- * @state: current state of the device, assigned by the core code
+ * @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
- * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
- * @set_state_periodic: switch state to periodic, if !set_mode
- * @set_state_oneshot: switch state to oneshot, if !set_mode
- * @set_state_shutdown: switch state to shutdown, if !set_mode
- * @tick_resume: resume clkevt device, if !set_mode
+ * @set_state_periodic: switch state to periodic
+ * @set_state_oneshot: switch state to oneshot
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped
+ * @set_state_shutdown: switch state to shutdown
+ * @tick_resume: resume clkevt device
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -112,20 +105,13 @@ struct clock_event_device {
u64 min_delta_ns;
u32 mult;
u32 shift;
- enum clock_event_mode mode;
- enum clock_event_state state;
+ enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
- /*
- * State transition callback(s): Only one of the two groups should be
- * defined:
- * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
- * - set_state_{shutdown|periodic|oneshot}(), tick_resume().
- */
- void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
int (*set_state_shutdown)(struct clock_event_device *);
int (*tick_resume)(struct clock_event_device *);
@@ -144,6 +130,32 @@ struct clock_event_device {
struct module *owner;
} ____cacheline_aligned;
+/* Helpers to verify state of a clockevent device */
+static inline bool clockevent_state_detached(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
+}
+
+static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
+}
+
+static inline bool clockevent_state_periodic(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
+}
+
+static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
+}
+
+static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
/*
* Calculate a multiplication factor for scaled math, which is used to convert
* nanoseconds based values to clock ticks:
@@ -203,13 +215,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
-extern int clockevents_notify(unsigned long reason, void *arg);
-
#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
diff --git a/kernel/include/linux/clocksource.h b/kernel/include/linux/clocksource.h
index d27d01522..7784b597e 100644
--- a/kernel/include/linux/clocksource.h
+++ b/kernel/include/linux/clocksource.h
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
@@ -247,16 +246,13 @@ extern int clocksource_i8253_init(void);
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1(clksrc, name, compat, fn)
-#ifdef CONFIG_CLKSRC_OF
-extern void clocksource_of_init(void);
+#ifdef CONFIG_CLKSRC_PROBE
+extern void clocksource_probe(void);
#else
-static inline void clocksource_of_init(void) {}
+static inline void clocksource_probe(void) {}
#endif
-#ifdef CONFIG_ACPI
-void acpi_generic_timer_init(void);
-#else
-static inline void acpi_generic_timer_init(void) { }
-#endif
+#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \
+ ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn)
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/kernel/include/linux/cma.h b/kernel/include/linux/cma.h
index f7ef093ec..29f9e774a 100644
--- a/kernel/include/linux/cma.h
+++ b/kernel/include/linux/cma.h
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/kernel/include/linux/com20020.h b/kernel/include/linux/com20020.h
deleted file mode 100644
index 85898995b..000000000
--- a/kernel/include/linux/com20020.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Linux ARCnet driver - COM20020 chipset support - function declarations
- *
- * Written 1997 by David Woodhouse.
- * Written 1994-1999 by Avery Pennarun.
- * Derived from skeleton.c by Donald Becker.
- *
- * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
- * for sponsoring the further development of this driver.
- *
- * **********************
- *
- * The original copyright of skeleton.c was as follows:
- *
- * skeleton.c Written 1993 by Donald Becker.
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency. This software may only be used
- * and distributed according to the terms of the GNU General Public License as
- * modified by SRC, incorporated herein by reference.
- *
- * **********************
- *
- * For more details, see drivers/net/arcnet.c
- *
- * **********************
- */
-#ifndef __COM20020_H
-#define __COM20020_H
-
-int com20020_check(struct net_device *dev);
-int com20020_found(struct net_device *dev, int shared);
-extern const struct net_device_ops com20020_netdev_ops;
-
-/* The number of low I/O ports used by the card. */
-#define ARCNET_TOTAL_SIZE 8
-
-/* various register addresses */
-#ifdef CONFIG_SA1100_CT6001
-#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */
-#else
-#define BUS_ALIGN 1
-#endif
-
-#define PLX_PCI_MAX_CARDS 2
-
-struct com20020_pci_channel_map {
- u32 bar;
- u32 offset;
- u32 size; /* 0x00 - auto, e.g. length of entire bar */
-};
-
-struct com20020_pci_card_info {
- const char *name;
- int devcount;
-
- struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
-
- unsigned int flags;
-};
-
-struct com20020_priv {
- struct com20020_pci_card_info *ci;
- struct list_head list_dev;
-};
-
-struct com20020_dev {
- struct list_head list;
- struct net_device *dev;
-
- struct com20020_priv *pci_priv;
- int index;
-};
-
-#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
-#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
-#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */
-#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */
-#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */
-#define _ADDR_LO (ioaddr+BUS_ALIGN*3)
-#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */
-#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */
-#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */
-#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG
- or _SUBADR) */
-
-/* in the ADDR_HI register */
-#define RDDATAflag 0x80 /* next access is a read (not a write) */
-
-/* in the DIAGSTAT register */
-#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */
-
-/* in the CONFIG register */
-#define RESETcfg 0x80 /* put card in reset state */
-#define TXENcfg 0x20 /* enable TX */
-
-/* in SETUP register */
-#define PROMISCset 0x10 /* enable RCV_ALL */
-#define P1MODE 0x80 /* enable P1-MODE for Backplane */
-#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */
-
-/* COM2002x */
-#define SUB_TENTATIVE 0 /* tentative node ID */
-#define SUB_NODE 1 /* node ID */
-#define SUB_SETUP1 2 /* various options */
-#define SUB_TEST 3 /* test/diag register */
-
-/* COM20022 only */
-#define SUB_SETUP2 4 /* sundry options */
-#define SUB_BUSCTL 5 /* bus control options */
-#define SUB_DMACOUNT 6 /* DMA count options */
-
-#define SET_SUBADR(x) do { \
- if ((x) < 4) \
- { \
- lp->config = (lp->config & ~0x03) | (x); \
- SETCONF; \
- } \
- else \
- { \
- outb(x, _SUBADR); \
- } \
-} while (0)
-
-#undef ARCRESET
-#undef ASTATUS
-#undef ACOMMAND
-#undef AINTMASK
-
-#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \
- udelay(5); \
- outb(lp->config , _CONFIG); \
- }
-#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \
- udelay(5); \
- outb(0x18 , _CONFIG); \
- }
-
-#define ASTATUS() inb(_STATUS)
-#define ADIAGSTATUS() inb(_DIAGSTAT)
-#define ACOMMAND(cmd) outb((cmd),_COMMAND)
-#define AINTMASK(msk) outb((msk),_INTMASK)
-
-#define SETCONF outb(lp->config, _CONFIG)
-
-#endif /* __COM20020_H */
diff --git a/kernel/include/linux/compaction.h b/kernel/include/linux/compaction.h
index aa8f61cf3..4cd4ddf64 100644
--- a/kernel/include/linux/compaction.h
+++ b/kernel/include/linux/compaction.h
@@ -15,7 +15,8 @@
/* For more detailed tracepoint output */
#define COMPACT_NO_SUITABLE_PAGE 5
#define COMPACT_NOT_SUITABLE_ZONE 6
-/* When adding new state, please change compaction_status_string, too */
+#define COMPACT_CONTENDED 7
+/* When adding new states, please adjust include/trace/events/compaction.h */
/* Used to signal whether compaction detected need_sched() or lock contention */
/* No contention detected */
diff --git a/kernel/include/linux/compat.h b/kernel/include/linux/compat.h
index ab2581469..a76c9172b 100644
--- a/kernel/include/linux/compat.h
+++ b/kernel/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-extern int compat_printk(const char *fmt, ...);
+extern __printf(1, 2) int compat_printk(const char *fmt, ...);
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
diff --git a/kernel/include/linux/compiler-gcc.h b/kernel/include/linux/compiler-gcc.h
index 371e560d1..22ab246fe 100644
--- a/kernel/include/linux/compiler-gcc.h
+++ b/kernel/include/linux/compiler-gcc.h
@@ -5,9 +5,9 @@
/*
* Common definitions for all gcc versions go here.
*/
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
/* Optimization barrier */
@@ -46,55 +46,63 @@
* the inline assembly constraint from =g to =r, in this particular
* case either is valid.
*/
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
- (typeof(ptr)) (__ptr + (off)); })
+#define RELOC_HIDE(ptr, off) \
+({ \
+ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); \
+})
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__
-#define __must_be_array(arr) 0
+#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old:
*/
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline inline __attribute__((always_inline)) notrace
-# define __inline__ __inline__ __attribute__((always_inline)) notrace
-# define __inline __inline __attribute__((always_inline)) notrace
+#define inline inline __attribute__((always_inline)) notrace
+#define __inline__ __inline__ __attribute__((always_inline)) notrace
+#define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
-# define inline inline notrace
-# define __inline__ __inline__ notrace
-# define __inline __inline notrace
+#define inline inline notrace
+#define __inline__ __inline__ notrace
+#define __inline __inline notrace
#endif
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
+#define __always_inline inline __attribute__((always_inline))
+#define noinline __attribute__((noinline))
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
/*
- * it doesn't make sense on ARM (currently the only user of __naked) to trace
- * naked functions because then mcount is called without stack and frame pointer
- * being set up and there is no chance to restore the lr register to the value
- * before mcount was called.
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling
+ * conventions, therefore they must be noinline and noclone.
*
- * The asm() bodies of naked functions often depend on standard calling conventions,
- * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
- * this, so we must do so ourselves. See GCC PR44290.
+ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
+ * See GCC PR44290.
*/
-#define __naked __attribute__((naked)) noinline __noclone notrace
+#define __naked __attribute__((naked)) noinline __noclone notrace
-#define __noreturn __attribute__((noreturn))
+#define __noreturn __attribute__((noreturn))
/*
* From the GCC manual:
@@ -106,28 +114,170 @@
* would be.
* [...]
*/
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define noinline __attribute__((noinline))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a, b) __attribute__((format(printf, a, b)))
+#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used __attribute__((__unused__))
+#else
+# define __used __attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __compiler_offsetof(a, b) \
+ __builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely. This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * When used with Link Time Optimization, gcc can optimize away C functions or
+ * variables which are referenced only from assembly code. __visible tells the
+ * optimizer that something else uses this function or variable, thus preventing
+ * this.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+
+#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
+/*
+ * __assume_aligned(n, k): Tell the optimizer that the returned
+ * pointer can be assumed to be k modulo n. The second argument is
+ * optional (default 0), so we use a variadic macro to make the
+ * shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ */
+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 50000
+#define KASAN_ABI_VERSION 4
+#elif GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
+
+#if GCC_VERSION >= 40902
+/*
+ * Tell the compiler that address safety instrumentation (KASAN)
+ * should not be applied to that function.
+ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ */
+#define __no_sanitize_address __attribute__((no_sanitize_address))
+#endif
+
+#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
+#if !defined(__no_sanitize_address)
+#define __no_sanitize_address
+#endif
+
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
-
-#define __always_inline inline __attribute__((always_inline))
diff --git a/kernel/include/linux/compiler-gcc3.h b/kernel/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89febe4..000000000
--- a/kernel/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION >= 30300
-# define __used __attribute__((__used__))
-#else
-# define __used __attribute__((__unused__))
-#endif
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
diff --git a/kernel/include/linux/compiler-gcc4.h b/kernel/include/linux/compiler-gcc4.h
deleted file mode 100644
index 769e19864..000000000
--- a/kernel/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
-#endif
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
-
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-
-#if GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
-#endif
diff --git a/kernel/include/linux/compiler-gcc5.h b/kernel/include/linux/compiler-gcc5.h
deleted file mode 100644
index efee49371..000000000
--- a/kernel/include/linux/compiler-gcc5.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-
-#define KASAN_ABI_VERSION 4
diff --git a/kernel/include/linux/compiler.h b/kernel/include/linux/compiler.h
index 867722591..6fc9a6dd5 100644
--- a/kernel/include/linux/compiler.h
+++ b/kernel/include/linux/compiler.h
@@ -17,6 +17,7 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
+# define __pmem
#endif
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -54,7 +56,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#include <linux/compiler-gcc.h>
#endif
-#ifdef CC_USING_HOTPATCH
+#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#define notrace __attribute__((hotpatch(0,0)))
#else
#define notrace __attribute__((no_instrument_function))
@@ -142,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
- if (__builtin_constant_p((cond)) ? !!(cond) : \
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
@@ -196,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h>
-static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+#define __READ_ONCE_SIZE \
+({ \
+ switch (size) { \
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
+ default: \
+ barrier(); \
+ __builtin_memcpy((void *)res, (const void *)p, size); \
+ barrier(); \
+ } \
+})
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
{
- switch (size) {
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
- default:
- barrier();
- __builtin_memcpy((void *)res, (const void *)p, size);
- barrier();
- }
+ __READ_ONCE_SIZE;
+}
+
+#ifdef CONFIG_KASAN
+/*
+ * This function is not 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+static __no_sanitize_address __maybe_unused
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+ __READ_ONCE_SIZE;
+}
+#else
+static __always_inline
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+ __READ_ONCE_SIZE;
}
+#endif
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
@@ -246,11 +274,30 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering.
*/
-#define READ_ONCE(x) \
- ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+#define __READ_ONCE(x, check) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u; \
+ if (check) \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ else \
+ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+ * to hide memory access from KASAN.
+ */
+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
#define WRITE_ONCE(x, val) \
- ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
#endif /* __KERNEL__ */
@@ -370,6 +417,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define __visible
#endif
+/*
+ * Assume alignment of return value.
+ */
+#ifndef __assume_aligned
+#define __assume_aligned(a, ...)
+#endif
+
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -450,13 +505,28 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
- * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ * If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU. That
+ * "something other" might be reference counting or simple immortality.
+ */
+#define lockless_dereference(p) \
+({ \
+ typeof(p) _________p1 = READ_ONCE(p); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
+
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
diff --git a/kernel/include/linux/completion.h b/kernel/include/linux/completion.h
index 3fe8d14c9..3bca1590e 100644
--- a/kernel/include/linux/completion.h
+++ b/kernel/include/linux/completion.h
@@ -7,7 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
*/
-#include <linux/wait-simple.h>
+#include <linux/swait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -23,11 +23,11 @@
*/
struct completion {
unsigned int done;
- struct swait_head wait;
+ struct swait_queue_head wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -72,7 +72,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_swait_head(&x->wait);
+ init_swait_queue_head(&x->wait);
}
/**
diff --git a/kernel/include/linux/configfs.h b/kernel/include/linux/configfs.h
index 34025df61..758a02901 100644
--- a/kernel/include/linux/configfs.h
+++ b/kernel/include/linux/configfs.h
@@ -64,14 +64,14 @@ struct config_item {
struct dentry *ci_dentry;
};
-extern int config_item_set_name(struct config_item *, const char *, ...);
+extern __printf(2, 3)
+int config_item_set_name(struct config_item *, const char *, ...);
static inline char *config_item_name(struct config_item * item)
{
return item->ci_name;
}
-extern void config_item_init(struct config_item *);
extern void config_item_init_type_name(struct config_item *item,
const char *name,
struct config_item_type *type);
@@ -125,86 +125,33 @@ struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
umode_t ca_mode;
+ ssize_t (*show)(struct config_item *, char *);
+ ssize_t (*store)(struct config_item *, const char *, size_t);
};
-/*
- * Users often need to create attribute structures for their configurable
- * attributes, containing a configfs_attribute member and function pointers
- * for the show() and store() operations on that attribute. If they don't
- * need anything else on the extended attribute structure, they can use
- * this macro to define it The argument _item is the name of the
- * config_item structure.
- */
-#define CONFIGFS_ATTR_STRUCT(_item) \
-struct _item##_attribute { \
- struct configfs_attribute attr; \
- ssize_t (*show)(struct _item *, char *); \
- ssize_t (*store)(struct _item *, const char *, size_t); \
+#define CONFIGFS_ATTR(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
+ .store = _pfx##_name##_store, \
}
-/*
- * With the extended attribute structure, users can use this macro
- * (similar to sysfs' __ATTR) to make defining attributes easier.
- * An example:
- * #define MYITEM_ATTR(_name, _mode, _show, _store) \
- * struct myitem_attribute childless_attr_##_name = \
- * __CONFIGFS_ATTR(_name, _mode, _show, _store)
- */
-#define __CONFIGFS_ATTR(_name, _mode, _show, _store) \
-{ \
- .attr = { \
- .ca_name = __stringify(_name), \
- .ca_mode = _mode, \
- .ca_owner = THIS_MODULE, \
- }, \
- .show = _show, \
- .store = _store, \
-}
-/* Here is a readonly version, only requiring a show() operation */
-#define __CONFIGFS_ATTR_RO(_name, _show) \
-{ \
- .attr = { \
- .ca_name = __stringify(_name), \
- .ca_mode = 0444, \
- .ca_owner = THIS_MODULE, \
- }, \
- .show = _show, \
+#define CONFIGFS_ATTR_RO(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IRUGO, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
}
-/*
- * With these extended attributes, the simple show_attribute() and
- * store_attribute() operations need to call the show() and store() of the
- * attributes. This is a common pattern, so we provide a macro to define
- * them. The argument _item is the name of the config_item structure.
- * This macro expects the attributes to be named "struct <name>_attribute"
- * and the function to_<name>() to exist;
- */
-#define CONFIGFS_ATTR_OPS(_item) \
-static ssize_t _item##_attr_show(struct config_item *item, \
- struct configfs_attribute *attr, \
- char *page) \
-{ \
- struct _item *_item = to_##_item(item); \
- struct _item##_attribute *_item##_attr = \
- container_of(attr, struct _item##_attribute, attr); \
- ssize_t ret = 0; \
- \
- if (_item##_attr->show) \
- ret = _item##_attr->show(_item, page); \
- return ret; \
-} \
-static ssize_t _item##_attr_store(struct config_item *item, \
- struct configfs_attribute *attr, \
- const char *page, size_t count) \
-{ \
- struct _item *_item = to_##_item(item); \
- struct _item##_attribute *_item##_attr = \
- container_of(attr, struct _item##_attribute, attr); \
- ssize_t ret = -EINVAL; \
- \
- if (_item##_attr->store) \
- ret = _item##_attr->store(_item, page, count); \
- return ret; \
+#define CONFIGFS_ATTR_WO(_pfx, _name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = S_IWUSR, \
+ .ca_owner = THIS_MODULE, \
+ .store = _pfx##_name##_store, \
}
/*
@@ -223,8 +170,6 @@ static ssize_t _item##_attr_store(struct config_item *item, \
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
- ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *);
- ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t);
int (*allow_link)(struct config_item *src, struct config_item *target);
int (*drop_link)(struct config_item *src, struct config_item *target);
};
@@ -252,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
int configfs_register_subsystem(struct configfs_subsystem *subsys);
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+int configfs_register_group(struct config_group *parent_group,
+ struct config_group *group);
+void configfs_unregister_group(struct config_group *group);
+
+struct config_group *
+configfs_register_default_group(struct config_group *parent_group,
+ const char *name,
+ struct config_item_type *item_type);
+void configfs_unregister_default_group(struct config_group *group);
+
/* These functions can sleep and can alloc with GFP_KERNEL */
/* WARNING: These cannot be called underneath configfs callbacks!! */
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/kernel/include/linux/console.h b/kernel/include/linux/console.h
index 9f50fb413..ea731af24 100644
--- a/kernel/include/linux/console.h
+++ b/kernel/include/linux/console.h
@@ -115,6 +115,7 @@ static inline int con_debug_leave(void)
#define CON_BOOT (8)
#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
#define CON_BRL (32) /* Used for a braille device */
+#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
struct console {
char name[16];
@@ -149,6 +150,7 @@ extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+extern void console_flush_on_panic(void);
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
diff --git a/kernel/include/linux/console_struct.h b/kernel/include/linux/console_struct.h
index e859c98d1..e329ee266 100644
--- a/kernel/include/linux/console_struct.h
+++ b/kernel/include/linux/console_struct.h
@@ -104,6 +104,7 @@ struct vc_data {
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
+ unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
diff --git a/kernel/include/linux/context_tracking.h b/kernel/include/linux/context_tracking.h
index 282183825..68b575afe 100644
--- a/kernel/include/linux/context_tracking.h
+++ b/kernel/include/linux/context_tracking.h
@@ -10,23 +10,25 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
+/* Called with interrupts disabled. */
+extern void __context_tracking_enter(enum ctx_state state);
+extern void __context_tracking_exit(enum ctx_state state);
+
extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
-extern void __context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next);
static inline void user_enter(void)
{
if (context_tracking_is_enabled())
- context_tracking_user_enter();
+ context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
if (context_tracking_is_enabled())
- context_tracking_user_exit();
+ context_tracking_exit(CONTEXT_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -51,21 +53,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
}
}
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static inline enum ctx_state ct_state(void)
{
- if (context_tracking_is_enabled())
- __context_tracking_task_switch(prev, next);
+ return context_tracking_is_enabled() ?
+ this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next) { }
+static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
@@ -83,13 +92,13 @@ static inline void guest_enter(void)
current->flags |= PF_VCPU;
if (context_tracking_is_enabled())
- context_tracking_enter(CONTEXT_GUEST);
+ __context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
if (context_tracking_is_enabled())
- context_tracking_exit(CONTEXT_GUEST);
+ __context_tracking_exit(CONTEXT_GUEST);
if (vtime_accounting_enabled())
vtime_guest_exit(current);
diff --git a/kernel/include/linux/context_tracking_state.h b/kernel/include/linux/context_tracking_state.h
index 6b7b96a32..ee956c528 100644
--- a/kernel/include/linux/context_tracking_state.h
+++ b/kernel/include/linux/context_tracking_state.h
@@ -12,7 +12,9 @@ struct context_tracking {
* may be further optimized using static keys.
*/
bool active;
+ int recursion;
enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
diff --git a/kernel/include/linux/coresight.h b/kernel/include/linux/coresight.h
index 3486b9082..a7cabfa23 100644
--- a/kernel/include/linux/coresight.h
+++ b/kernel/include/linux/coresight.h
@@ -14,6 +14,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/sched.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -206,7 +207,7 @@ struct coresight_ops_link {
* Operations available for sources.
* @trace_id: returns the value of the component's trace ID as known
to the HW.
- * @enable: enables tracing from a source.
+ * @enable: enables tracing for a source.
* @disable: disables tracing for a source.
*/
struct coresight_ops_source {
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
+#ifdef CONFIG_PID_NS
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid)
+{
+ struct task_struct *task = NULL;
+ unsigned long pid = 0;
+
+ rcu_read_lock();
+ task = find_task_by_vpid(vpid);
+ if (task)
+ pid = task_pid_nr(task);
+ rcu_read_unlock();
+
+ return pid;
+}
+#else
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
+#endif
+
#endif
diff --git a/kernel/include/linux/count_zeros.h b/kernel/include/linux/count_zeros.h
new file mode 100644
index 000000000..363da78c4
--- /dev/null
+++ b/kernel/include/linux/count_zeros.h
@@ -0,0 +1,57 @@
+/* Count leading and trailing zeros functions
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_
+#define _LINUX_BITOPS_COUNT_ZEROS_H_
+
+#include <asm/bitops.h>
+
+/**
+ * count_leading_zeros - Count the number of zeros from the MSB back
+ * @x: The value
+ *
+ * Count the number of leading zeros from the MSB going towards the LSB in @x.
+ *
+ * If the MSB of @x is set, the result is 0.
+ * If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
+ * If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
+ */
+static inline int count_leading_zeros(unsigned long x)
+{
+ if (sizeof(x) == 4)
+ return BITS_PER_LONG - fls(x);
+ else
+ return BITS_PER_LONG - fls64(x);
+}
+
+#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
+
+/**
+ * count_trailing_zeros - Count the number of zeros from the LSB forwards
+ * @x: The value
+ *
+ * Count the number of trailing zeros from the LSB going towards the MSB in @x.
+ *
+ * If the LSB of @x is set, the result is 0.
+ * If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
+ * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
+ */
+static inline int count_trailing_zeros(unsigned long x)
+{
+#define COUNT_TRAILING_ZEROS_0 (-1)
+
+ if (sizeof(x) == 4)
+ return ffs(x);
+ else
+ return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
+}
+
+#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */
diff --git a/kernel/include/linux/cpu.h b/kernel/include/linux/cpu.h
index 2a22c7c72..94041d803 100644
--- a/kernel/include/linux/cpu.h
+++ b/kernel/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
-extern struct device *cpu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
+extern __printf(4, 5)
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
@@ -227,7 +228,6 @@ extern struct bus_type cpu_subsys;
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
-extern bool try_get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -247,7 +247,6 @@ int cpu_down(unsigned int cpu);
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
-#define try_get_online_cpus() true
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
diff --git a/kernel/include/linux/cpu_cooling.h b/kernel/include/linux/cpu_cooling.h
index bd955270d..c156f5082 100644
--- a/kernel/include/linux/cpu_cooling.h
+++ b/kernel/include/linux/cpu_cooling.h
@@ -28,6 +28,9 @@
#include <linux/thermal.h>
#include <linux/cpumask.h>
+typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
+ unsigned long voltage, u32 *power);
+
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -36,6 +39,10 @@
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus);
+struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+ u32 capacitance, get_static_t plat_static_func);
+
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
@@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus);
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus);
+
+struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
@@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np,
{
return ERR_PTR(-ENOSYS);
}
+
+static inline struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return NULL;
+}
#endif
/**
@@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+ u32 capacitance, get_static_t plat_static_func)
+{
+ return NULL;
+}
+
+static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
return ERR_PTR(-ENOSYS);
}
+
+static inline struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return NULL;
+}
+
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/kernel/include/linux/cpufeature.h b/kernel/include/linux/cpufeature.h
index c4d4eb8ac..986c06c88 100644
--- a/kernel/include/linux/cpufeature.h
+++ b/kernel/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <asm/cpufeature.h>
@@ -43,16 +44,16 @@
* For a list of legal values for 'feature', please consult the file
* 'asm/cpufeature.h' of your favorite architecture.
*/
-#define module_cpu_feature_match(x, __init) \
+#define module_cpu_feature_match(x, __initfunc) \
static struct cpu_feature const cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
-static int cpu_feature_match_ ## x ## _init(void) \
+static int __init cpu_feature_match_ ## x ## _init(void) \
{ \
if (!cpu_have_feature(cpu_feature(x))) \
return -ENODEV; \
- return __init(); \
+ return __initfunc(); \
} \
module_init(cpu_feature_match_ ## x ## _init)
diff --git a/kernel/include/linux/cpufreq.h b/kernel/include/linux/cpufreq.h
index 2ee4888c1..177c7680c 100644
--- a/kernel/include/linux/cpufreq.h
+++ b/kernel/include/linux/cpufreq.h
@@ -51,21 +51,21 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_real_policy {
+struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
- unsigned int policy; /* see above */
- struct cpufreq_governor *governor; /* see below */
};
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
cpumask_var_t related_cpus; /* Online + Offline CPUs */
+ cpumask_var_t real_cpus; /* Related and present */
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
- unsigned int cpu; /* cpu nr of CPU managing this policy */
+ unsigned int cpu; /* cpu managing this policy, must be online */
+
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -77,14 +77,16 @@ struct cpufreq_policy {
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
+ unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
+ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_real_policy user_policy;
+ struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table;
struct list_head policy_list;
@@ -125,9 +127,14 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
+static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ return NULL;
+}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
@@ -142,10 +149,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
-int cpufreq_get_global_kobject(void);
-void cpufreq_put_global_kobject(void);
-int cpufreq_sysfs_create_file(const struct attribute *attr);
-void cpufreq_sysfs_remove_file(const struct attribute *attr);
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
@@ -365,11 +368,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
-#define CPUFREQ_CREATE_POLICY (4)
-#define CPUFREQ_REMOVE_POLICY (5)
+#define CPUFREQ_NOTIFY (1)
+#define CPUFREQ_START (2)
+#define CPUFREQ_CREATE_POLICY (3)
+#define CPUFREQ_REMOVE_POLICY (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -574,6 +576,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_supported(void);
int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -587,12 +591,23 @@ static inline int cpufreq_boost_enabled(void)
{
return 0;
}
+
+static inline int cpufreq_enable_boost_support(void)
+{
+ return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ return false;
+}
#endif
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/kernel/include/linux/cpuidle.h b/kernel/include/linux/cpuidle.h
index 9c5e89254..786ad3263 100644
--- a/kernel/include/linux/cpuidle.h
+++ b/kernel/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
struct list_head device_list;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
- int safe_state_index;
cpumask_t coupled_cpus;
struct cpuidle_coupled *coupled;
#endif
@@ -151,10 +150,6 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
-extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
@@ -190,16 +185,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
+static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ struct cpuidle_device *dev) {return NULL; }
+#endif
+
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
- struct cpuidle_device *dev) {return NULL; }
#endif
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
+
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
diff --git a/kernel/include/linux/cpuset.h b/kernel/include/linux/cpuset.h
index 1b357997c..fea160ee5 100644
--- a/kernel/include/linux/cpuset.h
+++ b/kernel/include/linux/cpuset.h
@@ -93,7 +93,7 @@ extern int current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
-extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+extern void cpuset_print_current_mems_allowed(void);
/*
* read_mems_allowed_begin is required when making decisions involving
@@ -104,6 +104,9 @@ extern void cpuset_print_task_mems_allowed(struct task_struct *p);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
+ if (!cpusets_enabled())
+ return 0;
+
return read_seqcount_begin(&current->mems_allowed_seq);
}
@@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
+ if (!cpusets_enabled())
+ return false;
+
return read_seqcount_retry(&current->mems_allowed_seq, seq);
}
@@ -131,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern void cpuset_post_attach_flush(void);
+
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -219,7 +227,7 @@ static inline void rebuild_sched_domains(void)
partition_sched_domains(1, NULL, NULL);
}
-static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
+static inline void cpuset_print_current_mems_allowed(void)
{
}
@@ -237,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/kernel/include/linux/crc-itu-t.h b/kernel/include/linux/crc-itu-t.h
index 84920f3cc..a9953c762 100644
--- a/kernel/include/linux/crc-itu-t.h
+++ b/kernel/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
* Init 0
*
* This source code is licensed under the GNU General Public License,
diff --git a/kernel/include/linux/crc-t10dif.h b/kernel/include/linux/crc-t10dif.h
index cf53d0773..d81961e9e 100644
--- a/kernel/include/linux/crc-t10dif.h
+++ b/kernel/include/linux/crc-t10dif.h
@@ -9,5 +9,6 @@
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
extern __u16 crc_t10dif(unsigned char const *, size_t);
+extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
#endif
diff --git a/kernel/include/linux/cred.h b/kernel/include/linux/cred.h
index 8b6c083e6..8d70e1361 100644
--- a/kernel/include/linux/cred.h
+++ b/kernel/include/linux/cred.h
@@ -137,6 +137,7 @@ struct cred {
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
+ kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
}
#endif
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+ return cap_issubset(cred->cap_ambient,
+ cap_intersect(cred->cap_permitted,
+ cred->cap_inheritable));
+}
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
diff --git a/kernel/include/linux/crush/crush.h b/kernel/include/linux/crush/crush.h
index 48a1a7d10..48b493057 100644
--- a/kernel/include/linux/crush/crush.h
+++ b/kernel/include/linux/crush/crush.h
@@ -1,7 +1,11 @@
#ifndef CEPH_CRUSH_CRUSH_H
#define CEPH_CRUSH_CRUSH_H
-#include <linux/types.h>
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
/*
* CRUSH is a pseudo-random data distribution algorithm that
@@ -20,7 +24,11 @@
#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
+#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
+#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
+#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
+#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
@@ -108,6 +116,15 @@ enum {
};
extern const char *crush_bucket_alg_name(int alg);
+/*
+ * although tree was a legacy algorithm, it has been buggy, so
+ * exclude it.
+ */
+#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
+ (1 << CRUSH_BUCKET_UNIFORM) | \
+ (1 << CRUSH_BUCKET_LIST) | \
+ (1 << CRUSH_BUCKET_STRAW))
+
struct crush_bucket {
__s32 id; /* this'll be negative */
__u16 type; /* non-zero; type=0 is reserved for devices */
@@ -174,7 +191,7 @@ struct crush_map {
/* choose local attempts using a fallback permutation before
* re-descent */
__u32 choose_local_fallback_tries;
- /* choose attempts before giving up */
+ /* choose attempts before giving up */
__u32 choose_total_tries;
/* attempt chooseleaf inner descent once for firstn mode; on
* reject retry outer descent. Note that this does *not*
@@ -187,6 +204,25 @@ struct crush_map {
* that want to limit reshuffling, a value of 3 or 4 will make the
* mappings line up a bit better with previous mappings. */
__u8 chooseleaf_vary_r;
+
+#ifndef __KERNEL__
+ /*
+ * version 0 (original) of straw_calc has various flaws. version 1
+ * fixes a few of them.
+ */
+ __u8 straw_calc_version;
+
+ /*
+ * allowed bucket algs is a bitmask, here the bit positions
+ * are CRUSH_BUCKET_*. note that these are *bits* and
+ * CRUSH_BUCKET_* values are not, so we need to or together (1
+ * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
+ * minimize confusion (bucket type values start at 1).
+ */
+ __u32 allowed_bucket_algs;
+
+ __u32 *choose_tries;
+#endif
};
diff --git a/kernel/include/linux/crush/hash.h b/kernel/include/linux/crush/hash.h
index 91e884230..d1d902582 100644
--- a/kernel/include/linux/crush/hash.h
+++ b/kernel/include/linux/crush/hash.h
@@ -1,6 +1,12 @@
#ifndef CEPH_CRUSH_HASH_H
#define CEPH_CRUSH_HASH_H
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
+
#define CRUSH_HASH_RJENKINS1 0
#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
diff --git a/kernel/include/linux/crush/mapper.h b/kernel/include/linux/crush/mapper.h
index eab367446..5dfd5b112 100644
--- a/kernel/include/linux/crush/mapper.h
+++ b/kernel/include/linux/crush/mapper.h
@@ -8,7 +8,7 @@
* LGPL2
*/
-#include <linux/crush/crush.h>
+#include "crush.h"
extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
extern int crush_do_rule(const struct crush_map *map,
diff --git a/kernel/include/linux/crypto.h b/kernel/include/linux/crypto.h
index 10df5d2d0..e71cb70a1 100644
--- a/kernel/include/linux/crypto.h
+++ b/kernel/include/linux/crypto.h
@@ -53,6 +53,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
@@ -135,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
-struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -175,32 +173,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct aead_request - AEAD request
- * @base: Common attributes for async crypto requests
- * @assoclen: Length in bytes of associated data for authentication
- * @cryptlen: Length of data to be encrypted or decrypted
- * @iv: Initialisation vector
- * @assoc: Associated data
- * @src: Source data
- * @dst: Destination data
- * @__ctx: Start of private context data
- */
-struct aead_request {
- struct crypto_async_request base;
-
- unsigned int assoclen;
- unsigned int cryptlen;
-
- u8 *iv;
-
- struct scatterlist *assoc;
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
@@ -294,47 +266,6 @@ struct ablkcipher_alg {
};
/**
- * struct aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- * transformation. A transformation may support smaller tag sizes.
- * As the authentication tag is a message digest to ensure the
- * integrity of the encrypted data, a consumer typically wants the
- * largest authentication tag possible as defined by this
- * variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- * function is used to specify the consumer requested size of the
- * authentication tag to be either generated by the transformation
- * during encryption or the size of the authentication tag to be
- * supplied during the decryption operation. This function is also
- * responsible for checking the authentication tag size for
- * validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct aead_alg {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int ivsize;
- unsigned int maxauthsize;
-};
-
-/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
* @max_keysize: see struct ablkcipher_alg
@@ -426,40 +357,11 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-/**
- * struct rng_alg - random number generator definition
- * @rng_make_random: The function defined by this variable obtains a random
- * number. The random number generator transform must generate
- * the random number out of the context provided with this
- * call.
- * @rng_reset: Reset of the random number generator by clearing the entire state.
- * With the invocation of this function call, the random number
- * generator shall completely reinitialize its state. If the random
- * number generator requires a seed for setting up a new state,
- * the seed must be provided by the consumer while invoking this
- * function. The required size of the seed is defined with
- * @seedsize .
- * @seedsize: The seed size required for a random number generator
- * initialization defined with this variable. Some random number
- * generators like the SP800-90A DRBG does not require a seed as the
- * seeding is implemented internally without the need of support by
- * the consumer. In this case, the seed size is set to zero.
- */
-struct rng_alg {
- int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-
- unsigned int seedsize;
-};
-
#define cra_ablkcipher cra_u.ablkcipher
-#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
-#define cra_rng cra_u.rng
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -505,9 +407,9 @@ struct rng_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * trasnformation types. There are multiple options:
+ * transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -555,11 +457,9 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
- struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -567,7 +467,7 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-};
+} CRYPTO_MINALIGN_ATTR;
/*
* Algorithm registration interface.
@@ -602,21 +502,6 @@ struct ablkcipher_tfm {
unsigned int reqsize;
};
-struct aead_tfm {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *base;
-
- unsigned int ivsize;
- unsigned int authsize;
- unsigned int reqsize;
-};
-
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -655,19 +540,11 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
-struct rng_tfm {
- int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
-
#define crt_ablkcipher crt_u.ablkcipher
-#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
-#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -675,12 +552,10 @@ struct crypto_tfm {
union {
struct ablkcipher_tfm ablkcipher;
- struct aead_tfm aead;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct compress_tfm compress;
- struct rng_tfm rng;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
@@ -694,10 +569,6 @@ struct crypto_ablkcipher {
struct crypto_tfm base;
};
-struct crypto_aead {
- struct crypto_tfm base;
-};
-
struct crypto_blkcipher {
struct crypto_tfm base;
};
@@ -714,10 +585,6 @@ struct crypto_hash {
struct crypto_tfm base;
};
-struct crypto_rng {
- struct crypto_tfm base;
-};
-
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -1194,400 +1061,6 @@ static inline void ablkcipher_request_set_crypt(
}
/**
- * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
- *
- * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
- * (listed as type "aead" in /proc/crypto)
- *
- * The most prominent examples for this type of encryption is GCM and CCM.
- * However, the kernel supports other types of AEAD ciphers which are defined
- * with the following cipher string:
- *
- * authenc(keyed message digest, block cipher)
- *
- * For example: authenc(hmac(sha256), cbc(aes))
- *
- * The example code provided for the asynchronous block cipher operation
- * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addtion, for the AEAD
- * operation, the aead_request_set_assoc function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
- * operation is that the caller should explicitly check for -EBADMSG of the
- * crypto_aead_decrypt. That error indicates an authentication error, i.e.
- * a breach in the integrity of the message. In essence, that -EBADMSG error
- * code is the key bonus an AEAD cipher has over "standard" block chaining
- * modes.
- */
-
-static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_aead *)tfm;
-}
-
-/**
- * crypto_alloc_aead() - allocate AEAD cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * AEAD cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for an AEAD. The returned struct
- * crypto_aead is the cipher handle that is required for any subsequent
- * API invocation for that AEAD.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
-
-static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_aead() - zeroize and free aead handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_aead(struct crypto_aead *tfm)
-{
- crypto_free_tfm(crypto_aead_tfm(tfm));
-}
-
-static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->crt_aead;
-}
-
-/**
- * crypto_aead_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the aead referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_aead_authsize() - obtain maximum authentication data size
- * @tfm: cipher handle
- *
- * The maximum size of the authentication data for the AEAD cipher referenced
- * by the AEAD cipher handle is returned. The authentication data size may be
- * zero if the cipher implements a hard-coded maximum.
- *
- * The authentication data may also be known as "tag value".
- *
- * Return: authentication data size / tag size in bytes
- */
-static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->authsize;
-}
-
-/**
- * crypto_aead_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the AEAD referenced with the cipher handle is returned.
- * The caller may use that information to allocate appropriate memory for the
- * data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
-}
-
-static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
-}
-
-static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
-{
- return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
-}
-
-static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
-}
-
-static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
-}
-
-/**
- * crypto_aead_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the AEAD referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aead_tfm *crt = crypto_aead_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_aead_setauthsize() - set authentication data size
- * @tfm: cipher handle
- * @authsize: size of the authentication data / tag in bytes
- *
- * Set the authentication data size / tag size. AEAD requires an authentication
- * tag (or MAC) in addition to the associated data.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
-
-static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
-{
- return __crypto_aead_cast(req->base.tfm);
-}
-
-/**
- * crypto_aead_encrypt() - encrypt plaintext
- * @req: reference to the aead_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The encryption operation creates the authentication data /
- * tag. That data is concatenated with the created ciphertext.
- * The ciphertext memory size is therefore the given number of
- * block cipher blocks + the size defined by the
- * crypto_aead_setauthsize invocation. The caller must ensure
- * that sufficient memory is available for the ciphertext and
- * the authentication tag.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_encrypt(struct aead_request *req)
-{
- return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
-}
-
-/**
- * crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
- * authentication data / tag. That authentication data / tag
- * must have the size defined by the crypto_aead_setauthsize
- * invocation.
- *
- *
- * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
- * cipher operation performs the authentication of the data during the
- * decryption operation. Therefore, the function returns this error if
- * the authentication of the ciphertext was unsuccessful (i.e. the
- * integrity of the ciphertext or the associated data was violated);
- * < 0 if an error occurred.
- */
-static inline int crypto_aead_decrypt(struct aead_request *req)
-{
- if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
- return -EINVAL;
-
- return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous AEAD Request Handle
- *
- * The aead_request data structure contains all pointers to data required for
- * the AEAD cipher operation. This includes the cipher handle (which can be
- * used by multiple aead_request instances), pointer to plaintext and
- * ciphertext, asynchronous callback function, etc. It acts as a handle to the
- * aead_request_* API calls in a similar way as AEAD handle to the
- * crypto_aead_* API calls.
- */
-
-/**
- * crypto_aead_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->reqsize;
-}
-
-/**
- * aead_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing aead handle in the request
- * data structure with a different one.
- */
-static inline void aead_request_set_tfm(struct aead_request *req,
- struct crypto_aead *tfm)
-{
- req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
-}
-
-/**
- * aead_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the AEAD
- * encrypt and decrypt API calls. During the allocation, the provided aead
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
- gfp_t gfp)
-{
- struct aead_request *req;
-
- req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * aead_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void aead_request_free(struct aead_request *req)
-{
- kzfree(req);
-}
-
-/**
- * aead_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * Setting the callback function that is triggered once the cipher operation
- * completes
- *
- * The callback function is registered with the aead_request handle and
- * must comply with the following template
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void aead_request_set_callback(struct aead_request *req,
- u32 flags,
- crypto_completion_t compl,
- void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * aead_request_set_crypt - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @cryptlen: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_aead_ivsize()
- *
- * Setting the source data and destination data scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- * the caller must concatenate the ciphertext followed by the
- * authentication tag and provide the entire data stream to the
- * decryption operation (i.e. the data length used for the
- * initialization of the scatterlist and the data length for the
- * decryption operation is identical). For encryption, however,
- * the authentication tag is created while encrypting the data.
- * The destination buffer must hold sufficient space for the
- * ciphertext and the authentication tag while the encryption
- * invocation must only point to the plaintext data size. The
- * following code snippet illustrates the memory usage
- * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
- */
-static inline void aead_request_set_crypt(struct aead_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int cryptlen, u8 *iv)
-{
- req->src = src;
- req->dst = dst;
- req->cryptlen = cryptlen;
- req->iv = iv;
-}
-
-/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * For encryption, the memory is filled with the associated data. For
- * decryption, the memory must point to the associated data.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
-}
-
-/**
* DOC: Synchronous Block Cipher API
*
* The synchronous block cipher API is used with the ciphers of type
diff --git a/kernel/include/linux/cryptouser.h b/kernel/include/linux/cryptouser.h
deleted file mode 100644
index 4abf2ea6a..000000000
--- a/kernel/include/linux/cryptouser.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Crypto user configuration API.
- *
- * Copyright (C) 2011 secunet Security Networks AG
- * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/* Netlink configuration messages. */
-enum {
- CRYPTO_MSG_BASE = 0x10,
- CRYPTO_MSG_NEWALG = 0x10,
- CRYPTO_MSG_DELALG,
- CRYPTO_MSG_UPDATEALG,
- CRYPTO_MSG_GETALG,
- __CRYPTO_MSG_MAX
-};
-#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
-#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
-
-#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
-
-/* Netlink message attributes. */
-enum crypto_attr_type_t {
- CRYPTOCFGA_UNSPEC,
- CRYPTOCFGA_PRIORITY_VAL, /* __u32 */
- CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */
- CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */
- CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */
- CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */
- CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
- CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
- CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
- __CRYPTOCFGA_MAX
-
-#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
-};
-
-struct crypto_user_alg {
- char cru_name[CRYPTO_MAX_ALG_NAME];
- char cru_driver_name[CRYPTO_MAX_ALG_NAME];
- char cru_module_name[CRYPTO_MAX_ALG_NAME];
- __u32 cru_type;
- __u32 cru_mask;
- __u32 cru_refcnt;
- __u32 cru_flags;
-};
-
-struct crypto_report_larval {
- char type[CRYPTO_MAX_NAME];
-};
-
-struct crypto_report_hash {
- char type[CRYPTO_MAX_NAME];
- unsigned int blocksize;
- unsigned int digestsize;
-};
-
-struct crypto_report_cipher {
- char type[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- unsigned int min_keysize;
- unsigned int max_keysize;
-};
-
-struct crypto_report_blkcipher {
- char type[CRYPTO_MAX_NAME];
- char geniv[CRYPTO_MAX_NAME];
- unsigned int blocksize;
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-struct crypto_report_aead {
- char type[CRYPTO_MAX_NAME];
- char geniv[CRYPTO_MAX_NAME];
- unsigned int blocksize;
- unsigned int maxauthsize;
- unsigned int ivsize;
-};
-
-struct crypto_report_comp {
- char type[CRYPTO_MAX_NAME];
-};
-
-struct crypto_report_rng {
- char type[CRYPTO_MAX_NAME];
- unsigned int seedsize;
-};
-
-#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
- sizeof(struct crypto_report_blkcipher))
diff --git a/kernel/include/linux/dax.h b/kernel/include/linux/dax.h
new file mode 100644
index 000000000..b415e5215
--- /dev/null
+++ b/kernel/include/linux/dax.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_DAX_H
+#define _LINUX_DAX_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+#else
+static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, unsigned int flags, get_block_t gb,
+ dax_iodone_t di)
+{
+ return VM_FAULT_FALLBACK;
+}
+#define __dax_pmd_fault dax_pmd_fault
+#endif
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
+#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
+
+static inline bool vma_is_dax(struct vm_area_struct *vma)
+{
+ return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+}
+#endif
diff --git a/kernel/include/linux/dcache.h b/kernel/include/linux/dcache.h
index df334cbac..8a2e009c8 100644
--- a/kernel/include/linux/dcache.h
+++ b/kernel/include/linux/dcache.h
@@ -160,6 +160,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
+ struct inode *(*d_select_inode)(struct dentry *, unsigned);
} ____cacheline_aligned;
/*
@@ -225,6 +226,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
+#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
extern seqlock_t rename_lock;
@@ -325,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(4, 5)
+char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
@@ -406,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
*/
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- unsigned type = READ_ONCE(dentry->d_flags);
- smp_rmb();
- return type & DCACHE_ENTRY_TYPE;
+ return dentry->d_flags & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
@@ -505,6 +506,11 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
return dentry->d_inode != NULL;
}
+static inline int simple_positive(struct dentry *dentry)
+{
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
+}
+
extern void d_set_fallthru(struct dentry *dentry);
static inline bool d_is_fallthru(const struct dentry *dentry)
diff --git a/kernel/include/linux/dccp.h b/kernel/include/linux/dccp.h
index 221025423..61d042bbb 100644
--- a/kernel/include/linux/dccp.h
+++ b/kernel/include/linux/dccp.h
@@ -202,16 +202,16 @@ struct dccp_service_list {
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
#define DCCP_SERVICE_CODE_IS_ABSENT 0
-static inline int dccp_list_has_service(const struct dccp_service_list *sl,
+static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
const __be32 service)
{
if (likely(sl != NULL)) {
u32 i = sl->dccpsl_nr;
while (i--)
if (sl->dccpsl_list[i] == service)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
struct dccp_ackvec;
diff --git a/kernel/include/linux/debugfs.h b/kernel/include/linux/debugfs.h
index cb25af461..19c066dce 100644
--- a/kernel/include/linux/debugfs.h
+++ b/kernel/include/linux/debugfs.h
@@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir;
/* declared over in file.c */
extern const struct file_operations debugfs_file_operations;
-extern const struct inode_operations debugfs_link_operations;
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
@@ -80,6 +79,8 @@ struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
struct dentry *debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value);
+struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value);
struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
struct dentry *debugfs_create_x16(const char *name, umode_t mode,
@@ -93,7 +94,7 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
+ struct dentry *parent, bool *value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -117,6 +118,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
bool debugfs_initialized(void);
+ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
#else
#include <linux/err.h>
@@ -238,7 +245,7 @@ static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t m
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
- u32 *value)
+ bool *value)
{
return ERR_PTR(-ENODEV);
}
@@ -283,6 +290,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline ssize_t debugfs_read_file_bool(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/kernel/include/linux/devfreq.h b/kernel/include/linux/devfreq.h
index ce447f0f1..68030e22a 100644
--- a/kernel/include/linux/devfreq.h
+++ b/kernel/include/linux/devfreq.h
@@ -65,7 +65,10 @@ struct devfreq_dev_status {
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
* @get_dev_status: The device should provide the current performance
- * status to devfreq, which is used by governors.
+ * status to devfreq. Governors are recommended not to
+ * use this directly. Instead, governors are recommended
+ * to use devfreq_update_stats() along with
+ * devfreq.last_status.
* @get_cur_freq: The device should provide the current frequency
* at which it is operating.
* @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
struct delayed_work work;
unsigned long previous_freq;
+ struct devfreq_dev_status last_status;
void *data; /* private data for governors */
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
+/**
+ * devfreq_update_stats() - update the last_status pointer in struct devfreq
+ * @df: the devfreq instance whose status needs updating
+ *
+ * Governors are recommended to use this function along with last_status,
+ * which allows other entities to reuse the last_status without affecting
+ * the values fetched later by governors.
+ */
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
}
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/kernel/include/linux/devfreq_cooling.h b/kernel/include/linux/devfreq_cooling.h
new file mode 100644
index 000000000..7adf6cc4b
--- /dev/null
+++ b/kernel/include/linux/devfreq_cooling.h
@@ -0,0 +1,81 @@
+/*
+ * devfreq_cooling: Thermal cooling device implementation for devices using
+ * devfreq
+ *
+ * Copyright (C) 2014-2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DEVFREQ_COOLING_H__
+#define __DEVFREQ_COOLING_H__
+
+#include <linux/devfreq.h>
+#include <linux/thermal.h>
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+
+/**
+ * struct devfreq_cooling_power - Devfreq cooling power ops
+ * @get_static_power: Take voltage, in mV, and return the static power
+ * in mW. If NULL, the static power is assumed
+ * to be 0.
+ * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
+ * return the dynamic power draw in mW. If NULL,
+ * a simple power model is used.
+ * @dyn_power_coeff: Coefficient for the simple dynamic power model in
+ * mW/(MHz mV mV).
+ * If get_dynamic_power() is NULL, then the
+ * dynamic power is calculated as
+ * @dyn_power_coeff * frequency * voltage^2
+ */
+struct devfreq_cooling_power {
+ unsigned long (*get_static_power)(unsigned long voltage);
+ unsigned long (*get_dynamic_power)(unsigned long freq,
+ unsigned long voltage);
+ unsigned long dyn_power_coeff;
+};
+
+struct thermal_cooling_device *
+of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
+struct thermal_cooling_device *
+of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
+struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
+void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+
+#else /* !CONFIG_DEVFREQ_THERMAL */
+
+struct thermal_cooling_device *
+of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct thermal_cooling_device *
+of_devfreq_cooling_register(struct device_node *np, struct devfreq *df)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct thermal_cooling_device *
+devfreq_cooling_register(struct devfreq *df)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void
+devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
+{
+}
+
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* __DEVFREQ_COOLING_H__ */
diff --git a/kernel/include/linux/device-mapper.h b/kernel/include/linux/device-mapper.h
index 51cc1deb7..ec1c61c87 100644
--- a/kernel/include/linux/device-mapper.h
+++ b/kernel/include/linux/device-mapper.h
@@ -79,11 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
-typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
- unsigned long arg);
-
-typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size);
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode);
/*
* These iteration functions are typically used to check (and combine)
@@ -159,8 +156,7 @@ struct target_type {
dm_resume_fn resume;
dm_status_fn status;
dm_message_fn message;
- dm_ioctl_fn ioctl;
- dm_merge_fn merge;
+ dm_prepare_ioctl_fn prepare_ioctl;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
diff --git a/kernel/include/linux/device.h b/kernel/include/linux/device.h
index 6558af90c..b8f411b57 100644
--- a/kernel/include/linux/device.h
+++ b/kernel/include/linux/device.h
@@ -196,12 +196,41 @@ extern struct kset *bus_get_kset(struct bus_type *bus);
extern struct klist *bus_get_device_klist(struct bus_type *bus);
/**
+ * enum probe_type - device driver probe type to try
+ * Device drivers may opt in for special handling of their
+ * respective probe routines. This tells the core what to
+ * expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ * whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ * probing order is not essential for booting the system may
+ * opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ * their probe routines to run synchronously with driver and
+ * device registration (with the exception of -EPROBE_DEFER
+ * handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY,
+ PROBE_PREFER_ASYNCHRONOUS,
+ PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
* struct device_driver - The basic device driver structure
* @name: Name of the device driver.
* @bus: The bus which the device of this driver belongs to.
* @owner: The module owner.
* @mod_name: Used for built-in modules.
* @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type: Type of the probe (synchronous or asynchronous) to use.
* @of_match_table: The open firmware table.
* @acpi_match_table: The ACPI match table.
* @probe: Called to query the existence of a specific device,
@@ -235,6 +264,7 @@ struct device_driver {
const char *mod_name; /* used for built-in modules */
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+ enum probe_type probe_type;
const struct of_device_id *of_match_table;
const struct acpi_device_id *acpi_match_table;
@@ -311,7 +341,7 @@ struct subsys_interface {
struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
- int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+ void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};
int subsys_interface_register(struct subsys_interface *sif);
@@ -574,13 +604,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
- const char *name);
+extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid, const char *name);
#define devres_alloc(release, size, gfp) \
- __devres_alloc(release, size, gfp, #release)
+ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+#define devres_alloc_node(release, size, gfp, nid) \
+ __devres_alloc_node(release, size, gfp, nid, #release)
#else
-extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
+extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid);
+static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+{
+ return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
+}
#endif
+
extern void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
void (*fn)(struct device *, void *, void *),
@@ -607,8 +645,9 @@ extern int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
-extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap);
+extern __printf(3, 0)
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap);
extern __printf(3, 4)
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -683,6 +722,8 @@ struct device_dma_parameters {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_list: Hosts MSI descriptors
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -743,9 +784,15 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain;
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct list_head msi_list;
+#endif
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@@ -830,6 +877,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
@@ -928,6 +991,8 @@ extern int __must_check device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
+extern int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);
@@ -975,17 +1040,16 @@ extern int __must_check device_bind_driver(struct device *dev);
extern void device_release_driver(struct device *dev);
extern int __must_check device_attach(struct device *dev);
extern int __must_check driver_attach(struct device_driver *drv);
+extern void device_initial_probe(struct device *dev);
extern int __must_check device_reprobe(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern struct device *device_create_vargs(struct class *cls,
- struct device *parent,
- dev_t devt,
- void *drvdata,
- const char *fmt,
- va_list vargs);
+extern __printf(5, 0)
+struct device *device_create_vargs(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
+ const char *fmt, va_list vargs);
extern __printf(5, 6)
struct device *device_create(struct class *cls, struct device *parent,
dev_t devt, void *drvdata,
@@ -1269,4 +1333,26 @@ static void __exit __driver##_exit(void) \
} \
module_exit(__driver##_exit);
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall). This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
#endif /* _DEVICE_H_ */
diff --git a/kernel/include/linux/devpts_fs.h b/kernel/include/linux/devpts_fs.h
index 251a2090a..e0ee0b300 100644
--- a/kernel/include/linux/devpts_fs.h
+++ b/kernel/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{
diff --git a/kernel/include/linux/dma-buf.h b/kernel/include/linux/dma-buf.h
index 2f0b431b7..f98bd7068 100644
--- a/kernel/include/linux/dma-buf.h
+++ b/kernel/include/linux/dma-buf.h
@@ -115,6 +115,8 @@ struct dma_buf_ops {
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @exp_name: name of the exporter; useful for debugging.
+ * @owner: pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
@@ -129,6 +131,7 @@ struct dma_buf {
unsigned vmapping_counter;
void *vmap_ptr;
const char *exp_name;
+ struct module *owner;
struct list_head list_node;
void *priv;
struct reservation_object *resv;
@@ -164,7 +167,8 @@ struct dma_buf_attachment {
/**
* struct dma_buf_export_info - holds information needed to export a dma_buf
- * @exp_name: name of the exporting module - useful for debugging.
+ * @exp_name: name of the exporter - useful for debugging.
+ * @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
* @size: Size of the buffer
* @flags: mode flags for the file
@@ -176,6 +180,7 @@ struct dma_buf_attachment {
*/
struct dma_buf_export_info {
const char *exp_name;
+ struct module *owner;
const struct dma_buf_ops *ops;
size_t size;
int flags;
@@ -187,7 +192,8 @@ struct dma_buf_export_info {
* helper macro for exporters; zeros and fills in most common values
*/
#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
- struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME }
+ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+ .owner = THIS_MODULE }
/**
* get_dma_buf - convenience wrapper for get_file.
diff --git a/kernel/include/linux/dma-contiguous.h b/kernel/include/linux/dma-contiguous.h
index 569bbd039..fec734df1 100644
--- a/kernel/include/linux/dma-contiguous.h
+++ b/kernel/include/linux/dma-contiguous.h
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
return ret;
}
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order)
{
return NULL;
diff --git a/kernel/include/linux/dma-iommu.h b/kernel/include/linux/dma-iommu.h
new file mode 100644
index 000000000..fc4810374
--- /dev/null
+++ b/kernel/include/linux/dma-iommu.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014-2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_IOMMU_H
+#define __DMA_IOMMU_H
+
+#ifdef __KERNEL__
+#include <asm/errno.h>
+
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/iommu.h>
+
+int iommu_dma_init(void);
+
+/* Domain management interface for IOMMU drivers */
+int iommu_get_dma_cookie(struct iommu_domain *domain);
+void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+/* Setup call for arch DMA mapping code */
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
+
+/* General helpers for DMA-API <-> IOMMU-API interaction */
+int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
+
+/*
+ * These implement the bulk of the relevant DMA mapping callbacks, but require
+ * the arch code to take care of attributes and cache maintenance
+ */
+struct page **iommu_dma_alloc(struct device *dev, size_t size,
+ gfp_t gfp, int prot, dma_addr_t *handle,
+ void (*flush_page)(struct device *, const void *, phys_addr_t));
+void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
+ dma_addr_t *handle);
+
+int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int prot);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int prot);
+
+/*
+ * Arch code with no special attribute handling may use these
+ * directly as DMA mapping callbacks for simplicity
+ */
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+int iommu_dma_supported(struct device *dev, u64 mask);
+int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#else
+
+struct iommu_domain;
+
+static inline int iommu_dma_init(void)
+{
+ return 0;
+}
+
+static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+#endif /* __KERNEL__ */
+#endif /* __DMA_IOMMU_H */
diff --git a/kernel/include/linux/dma-mapping.h b/kernel/include/linux/dma-mapping.h
index ac07ff090..2e551e2d2 100644
--- a/kernel/include/linux/dma-mapping.h
+++ b/kernel/include/linux/dma-mapping.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
+#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -145,7 +146,9 @@ static inline void arch_teardown_dma_ops(struct device *dev) { }
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
- return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
+ if (dev->dma_parms && dev->dma_parms->max_segment_size)
+ return dev->dma_parms->max_segment_size;
+ return SZ_64K;
}
static inline unsigned int dma_set_max_seg_size(struct device *dev,
@@ -154,14 +157,15 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev,
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
return 0;
- } else
- return -EIO;
+ }
+ return -EIO;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
- return dev->dma_parms ?
- dev->dma_parms->segment_boundary_mask : 0xffffffff;
+ if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
+ return dev->dma_parms->segment_boundary_mask;
+ return DMA_BIT_MASK(32);
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
@@ -169,8 +173,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
if (dev->dma_parms) {
dev->dma_parms->segment_boundary_mask = mask;
return 0;
- } else
- return -EIO;
+ }
+ return -EIO;
}
#ifndef dma_max_pfn
diff --git a/kernel/include/linux/dma/hsu.h b/kernel/include/linux/dma/hsu.h
index 234393a69..79df69dc6 100644
--- a/kernel/include/linux/dma/hsu.h
+++ b/kernel/include/linux/dma/hsu.h
@@ -35,14 +35,23 @@ struct hsu_dma_chip {
unsigned int length;
unsigned int offset;
struct hsu_dma *hsu;
- struct hsu_dma_platform_data *pdata;
};
+#if IS_ENABLED(CONFIG_HSU_DMA)
/* Export to the internal users */
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
/* Export to the platform drivers */
int hsu_dma_probe(struct hsu_dma_chip *chip);
int hsu_dma_remove(struct hsu_dma_chip *chip);
+#else
+static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip,
+ unsigned short nr)
+{
+ return IRQ_NONE;
+}
+static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; }
+static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; }
+#endif /* CONFIG_HSU_DMA */
#endif /* _DMA_HSU_H */
diff --git a/kernel/include/linux/dma/pxa-dma.h b/kernel/include/linux/dma/pxa-dma.h
new file mode 100644
index 000000000..3edc99294
--- /dev/null
+++ b/kernel/include/linux/dma/pxa-dma.h
@@ -0,0 +1,27 @@
+#ifndef _PXA_DMA_H_
+#define _PXA_DMA_H_
+
+enum pxad_chan_prio {
+ PXAD_PRIO_HIGHEST = 0,
+ PXAD_PRIO_NORMAL,
+ PXAD_PRIO_LOW,
+ PXAD_PRIO_LOWEST,
+};
+
+struct pxad_param {
+ unsigned int drcmr;
+ enum pxad_chan_prio prio;
+};
+
+struct dma_chan;
+
+#ifdef CONFIG_PXA_DMA
+bool pxad_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif /* _PXA_DMA_H_ */
diff --git a/kernel/include/linux/dma_remapping.h b/kernel/include/linux/dma_remapping.h
index 7ac17f572..187c10299 100644
--- a/kernel/include/linux/dma_remapping.h
+++ b/kernel/include/linux/dma_remapping.h
@@ -20,6 +20,14 @@
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
+/* Extended context entry types */
+#define CONTEXT_TT_PT_PASID 4
+#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
+#define CONTEXT_TT_MASK (7ULL << 2)
+
+#define CONTEXT_DINVE (1ULL << 8)
+#define CONTEXT_PRS (1ULL << 9)
+#define CONTEXT_PASIDE (1ULL << 11)
struct intel_iommu;
struct dmar_domain;
diff --git a/kernel/include/linux/dmaengine.h b/kernel/include/linux/dmaengine.h
index ad4197572..c47c68e53 100644
--- a/kernel/include/linux/dmaengine.h
+++ b/kernel/include/linux/dmaengine.h
@@ -65,6 +65,8 @@ enum dma_transaction_type {
DMA_PQ,
DMA_XOR_VAL,
DMA_PQ_VAL,
+ DMA_MEMSET,
+ DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -122,10 +124,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
*/
struct data_chunk {
size_t size;
size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
};
/**
@@ -174,6 +184,8 @@ struct dma_interleaved_template {
* operation it continues the calculation with new sources
* @DMA_PREP_FENCE - tell the driver that subsequent operations depend
* on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -182,6 +194,7 @@ enum dma_ctrl_flags {
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
};
/**
@@ -222,6 +235,16 @@ struct dma_chan_percpu {
};
/**
+ * struct dma_router - DMA router structure
+ * @dev: pointer to the DMA router device
+ * @route_free: function to be called when the route can be disconnected
+ */
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *dev, void *route_data);
+};
+
+/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
@@ -232,6 +255,8 @@ struct dma_chan_percpu {
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
* @table_count: number of appearances in the mem-to-mem allocation table
+ * @router: pointer to the DMA router structure
+ * @route_data: channel specific data for the router
* @private: private data for certain client-channel associations
*/
struct dma_chan {
@@ -247,6 +272,11 @@ struct dma_chan {
struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
+
+ /* DMA router */
+ struct dma_router *router;
+ void *route_data;
+
void *private;
};
@@ -374,6 +404,8 @@ enum dma_residue_granularity {
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -382,6 +414,7 @@ struct dma_slave_caps {
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -441,6 +474,7 @@ struct dma_async_tx_descriptor {
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
@@ -559,6 +593,20 @@ struct dma_tx_state {
};
/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -570,6 +618,7 @@ struct dma_tx_state {
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
+ * @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
@@ -588,12 +637,15 @@ struct dma_tx_state {
* @device_prep_dma_xor_val: prepares a xor validation operation
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
+ * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -617,9 +669,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
- u8 copy_align;
- u8 xor_align;
- u8 pq_align;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -650,6 +703,12 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -669,6 +728,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
+ struct dma_chan *chan, dma_addr_t dst, u64 data,
+ unsigned long flags);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
@@ -745,6 +807,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags)
+{
+ if (!chan || !chan->device)
+ return NULL;
+
+ return chan->device->device_prep_dma_memset(chan, dest, value,
+ len, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
@@ -790,7 +863,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
return desc->tx_submit(desc);
}
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -820,6 +894,12 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
+static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
+}
+
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
@@ -874,6 +954,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
BUG();
}
+static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
+ size_t dir_icg)
+{
+ if (inc) {
+ if (dir_icg)
+ return dir_icg;
+ else if (sgl)
+ return icg;
+ }
+
+ return 0;
+}
+
+static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
+ chunk->icg, chunk->dst_icg);
+}
+
+static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
+ chunk->icg, chunk->src_icg);
+}
+
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
@@ -1079,6 +1186,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+
+ dma_get_slave_caps(tx->chan, &caps);
+
+ if (caps.descriptor_reuse) {
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+ } else {
+ return -EPERM;
+ }
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (dmaengine_desc_test_reuse(desc))
+ return desc->desc_free(desc);
+ else
+ return -EPERM;
+}
+
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
@@ -1093,7 +1233,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
static inline struct dma_chan
*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
- struct device *dev, char *name)
+ struct device *dev, const char *name)
{
struct dma_chan *chan;
@@ -1101,6 +1241,9 @@ static inline struct dma_chan
if (chan)
return chan;
+ if (!fn || !fn_param)
+ return NULL;
+
return __dma_request_channel(mask, fn, fn_param);
}
#endif /* DMAENGINE_H */
diff --git a/kernel/include/linux/dmapool.h b/kernel/include/linux/dmapool.h
index 52456aa56..53ba73750 100644
--- a/kernel/include/linux/dmapool.h
+++ b/kernel/include/linux/dmapool.h
@@ -11,8 +11,8 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/scatterlist.h>
#include <asm/io.h>
-#include <asm/scatterlist.h>
struct device;
@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
diff --git a/kernel/include/linux/dmar.h b/kernel/include/linux/dmar.h
index 30624954d..e9bc9292b 100644
--- a/kernel/include/linux/dmar.h
+++ b/kernel/include/linux/dmar.h
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
struct irte {
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 present : 1,
- fpd : 1,
- dst_mode : 1,
- redir_hint : 1,
- trigger_mode : 1,
- dlvry_mode : 3,
- avail : 4,
- __reserved_1 : 4,
- vector : 8,
- __reserved_2 : 8,
- dest_id : 32;
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
};
__u64 low;
};
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 sid : 16,
- sq : 2,
- svt : 2,
- __reserved_3 : 44;
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
};
__u64 high;
};
};
+static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
+{
+ dst->present = src->present;
+ dst->fpd = src->fpd;
+ dst->avail = src->avail;
+ dst->pst = src->pst;
+ dst->vector = src->vector;
+ dst->sid = src->sid;
+ dst->sq = src->sq;
+ dst->svt = src->svt;
+}
+
+#define PDA_LOW_BIT 26
+#define PDA_HIGH_BIT 32
+
enum {
IRQ_REMAP_XAPIC_MODE,
IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
-extern int arch_setup_dmar_msi(unsigned int irq);
+extern int dmar_alloc_hwirq(int id, int node, void *arg);
+extern void dmar_free_hwirq(int irq);
#endif /* __DMAR_H__ */
diff --git a/kernel/include/linux/dmi.h b/kernel/include/linux/dmi.h
index f820f0a33..5055ac341 100644
--- a/kernel/include/linux/dmi.h
+++ b/kernel/include/linux/dmi.h
@@ -2,6 +2,7 @@
#define __DMI_H__
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
/* enum dmi_field is in mod_devicetable.h */
@@ -74,7 +75,7 @@ struct dmi_header {
u8 type;
u8 length;
u16 handle;
-};
+} __packed;
struct dmi_device {
struct list_head list;
@@ -93,6 +94,7 @@ struct dmi_dev_onboard {
int devfn;
};
+extern struct kobject *dmi_kobj;
extern int dmi_check_system(const struct dmi_system_id *list);
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
diff --git a/kernel/include/linux/dns_resolver.h b/kernel/include/linux/dns_resolver.h
index cc92268af..6ac3cad9a 100644
--- a/kernel/include/linux/dns_resolver.h
+++ b/kernel/include/linux/dns_resolver.h
@@ -27,7 +27,7 @@
#ifdef __KERNEL__
extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time_t *_expiry);
+ const char *options, char **_result, time64_t *_expiry);
#endif /* KERNEL */
diff --git a/kernel/include/linux/edac.h b/kernel/include/linux/edac.h
index da3b72e95..4fe67b853 100644
--- a/kernel/include/linux/edac.h
+++ b/kernel/include/linux/edac.h
@@ -769,12 +769,10 @@ struct mem_ctl_info {
/* the internal state of this controller instance */
int op_state;
-#ifdef CONFIG_EDAC_DEBUG
struct dentry *debugfs;
u8 fake_inject_layer[EDAC_MAX_LAYERS];
- u32 fake_inject_ue;
+ bool fake_inject_ue;
u16 fake_inject_count;
-#endif
};
/*
diff --git a/kernel/include/linux/efi.h b/kernel/include/linux/efi.h
index af5be0368..47be3ad7d 100644
--- a/kernel/include/linux/efi.h
+++ b/kernel/include/linux/efi.h
@@ -85,7 +85,8 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO 11
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
-#define EFI_MAX_MEMORY_TYPE 14
+#define EFI_PERSISTENT_MEMORY 14
+#define EFI_MAX_MEMORY_TYPE 15
/* Attribute values: */
#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
@@ -96,6 +97,9 @@ typedef struct {
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_MORE_RELIABLE \
+ ((u64)0x0000000000010000ULL) /* higher reliability */
+#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -583,12 +587,18 @@ void efi_native_runtime_setup(void);
#define EFI_FILE_INFO_ID \
EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
+ EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
#define DEVICE_TREE_GUID \
EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
+#define EFI_PROPERTIES_TABLE_GUID \
+ EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -670,7 +680,7 @@ typedef struct {
} efi_system_table_t;
struct efi_memory_map {
- void *phys_map;
+ phys_addr_t phys_map;
void *map;
void *map_end;
int nr_map;
@@ -802,6 +812,15 @@ typedef struct _efi_file_io_interface {
#define EFI_FILE_MODE_WRITE 0x0000000000000002
#define EFI_FILE_MODE_CREATE 0x8000000000000000
+typedef struct {
+ u32 version;
+ u32 length;
+ u64 memory_protection_attribute;
+} efi_properties_table_t;
+
+#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
+#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
+
#define EFI_INVALID_TABLE_ADDR (~0UL)
/*
@@ -823,6 +842,8 @@ extern struct efi {
unsigned long fw_vendor; /* fw_vendor */
unsigned long runtime; /* runtime table */
unsigned long config_table; /* config tables */
+ unsigned long esrt; /* ESRT table */
+ unsigned long properties_table; /* properties table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -864,6 +885,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern void efi_find_mirror(void);
#else
static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
@@ -875,6 +897,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
+#ifdef CONFIG_EFI_ESRT
+extern void __init efi_esrt_init(void);
+#else
+static inline void efi_esrt_init(void) { }
+#endif
extern int efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
@@ -882,16 +909,25 @@ extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
+extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
-extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
+extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct efi_memory_map memmap;
+extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
+#ifdef CONFIG_EFI_FAKE_MEMMAP
+extern void __init efi_fake_memmap(void);
+#else
+static inline void efi_fake_memmap(void) { }
+#endif
+
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc(m, md) \
for ((md) = (m)->map; \
@@ -943,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
+#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#ifdef CONFIG_EFI
/*
@@ -1162,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ size_t len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);
diff --git a/kernel/include/linux/elevator.h b/kernel/include/linux/elevator.h
index 45a914744..638b324f0 100644
--- a/kernel/include/linux/elevator.h
+++ b/kernel/include/linux/elevator.h
@@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques
typedef int (elevator_init_fn) (struct request_queue *,
struct elevator_type *e);
typedef void (elevator_exit_fn) (struct elevator_queue *);
+typedef void (elevator_registered_fn) (struct request_queue *);
struct elevator_ops
{
@@ -68,6 +69,7 @@ struct elevator_ops
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
+ elevator_registered_fn *elevator_registered_fn;
};
#define ELV_NAME_MAX (16)
diff --git a/kernel/include/linux/enclosure.h b/kernel/include/linux/enclosure.h
index 7be22da32..a4cf57cd0 100644
--- a/kernel/include/linux/enclosure.h
+++ b/kernel/include/linux/enclosure.h
@@ -29,7 +29,11 @@
/* A few generic types ... taken from ses-2 */
enum enclosure_component_type {
ENCLOSURE_COMPONENT_DEVICE = 0x01,
+ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
+ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
+ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
+ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
};
/* ses-2 common element status */
diff --git a/kernel/include/linux/etherdevice.h b/kernel/include/linux/etherdevice.h
index 606563ef8..eb049c622 100644
--- a/kernel/include/linux/etherdevice.h
+++ b/kernel/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
- ((a[2] ^ b[2]) & m)) == 0;
+ (__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
@@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
{
- return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 a = *(const u32 *)addr;
+#else
+ u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+ return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+ return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+ return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+ return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+ return is_multicast_ether_addr(addr);
+#endif
}
/**
@@ -169,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
}
/**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+ /* if CPU is little endian mask off bits representing LSB */
+ proto &= htons(0xFF00);
+#endif
+ /* cast both to u16 and compare since LSB can be ignored */
+ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
+/**
* eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
diff --git a/kernel/include/linux/extcon.h b/kernel/include/linux/extcon.h
index 36f49c405..7abf674c3 100644
--- a/kernel/include/linux/extcon.h
+++ b/kernel/include/linux/extcon.h
@@ -1,6 +1,9 @@
/*
* External connector (extcon) class driver
*
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
* Copyright (C) 2012 Samsung Electronics
* Author: Donggeun Kim <dg77.kim@samsung.com>
* Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -24,53 +27,46 @@
#define __LINUX_EXTCON_H__
#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/sysfs.h>
-
-#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
/*
- * The standard cable name is to help support general notifier
- * and notifiee device drivers to share the common names.
- * Please use standard cable names unless your notifier device has
- * a very unique and abnormal cable or
- * the cable type is supposed to be used with only one unique
- * pair of notifier/notifiee devices.
- *
- * Please add any other "standard" cables used with extcon dev.
- *
- * You may add a dot and number to specify version or specification
- * of the specific cable if it is required. (e.g., "Fast-charger.18"
- * and "Fast-charger.10" for 1.8A and 1.0A chargers)
- * However, the notifiee and notifier should be able to handle such
- * string and if the notifiee can negotiate the protocol or identify,
- * you don't need such convention. This convention is helpful when
- * notifier can distinguish but notifiee cannot.
+ * Define the unique id of supported external connectors
*/
-enum extcon_cable_name {
- EXTCON_USB = 0,
- EXTCON_USB_HOST,
- EXTCON_TA, /* Travel Adaptor */
- EXTCON_FAST_CHARGER,
- EXTCON_SLOW_CHARGER,
- EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
- EXTCON_HDMI,
- EXTCON_MHL,
- EXTCON_DVI,
- EXTCON_VGA,
- EXTCON_DOCK,
- EXTCON_LINE_IN,
- EXTCON_LINE_OUT,
- EXTCON_MIC_IN,
- EXTCON_HEADPHONE_OUT,
- EXTCON_SPDIF_IN,
- EXTCON_SPDIF_OUT,
- EXTCON_VIDEO_IN,
- EXTCON_VIDEO_OUT,
- EXTCON_MECHANICAL,
-};
-extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
+#define EXTCON_NONE 0
+
+/* USB external connector */
+#define EXTCON_USB 1
+#define EXTCON_USB_HOST 2
+
+/* Charging external connector */
+#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
+#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
+#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
+#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */
+#define EXTCON_CHG_USB_FAST 9
+#define EXTCON_CHG_USB_SLOW 10
+
+/* Jack external connector */
+#define EXTCON_JACK_MICROPHONE 20
+#define EXTCON_JACK_HEADPHONE 21
+#define EXTCON_JACK_LINE_IN 22
+#define EXTCON_JACK_LINE_OUT 23
+#define EXTCON_JACK_VIDEO_IN 24
+#define EXTCON_JACK_VIDEO_OUT 25
+#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */
+#define EXTCON_JACK_SPDIF_OUT 27
+
+/* Display external connector */
+#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */
+#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
+#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */
+#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
+
+/* Miscellaneous external connector */
+#define EXTCON_DOCK 60
+#define EXTCON_JIG 61
+#define EXTCON_MECHANICAL 62
+
+#define EXTCON_NUM 63
struct extcon_cable;
@@ -78,7 +74,7 @@ struct extcon_cable;
* struct extcon_dev - An extcon device represents one external connector.
* @name: The name of this extcon device. Parent device name is
* used if NULL.
- * @supported_cable: Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
@@ -89,16 +85,12 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_name: An optional callback to override the method to print the
- * name of the extcon device.
- * @print_state: An optional callback to override the method to print the
- * status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
* @nh: Notifier for the state change events from this extcon
- * @entry: To support list of extcon devices so that users can search
- * for extcon devices based on the extcon name.
+ * @entry: To support list of extcon devices so that users can
+ * search for extcon devices based on the extcon name.
* @lock:
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
@@ -113,16 +105,12 @@ struct extcon_cable;
struct extcon_dev {
/* Optional user initializing data */
const char *name;
- const char **supported_cable;
+ const unsigned int *supported_cable;
const u32 *mutually_exclusive;
- /* Optional callbacks to override class functions */
- ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
- ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
-
/* Internal data. Please do not set. */
struct device dev;
- struct raw_notifier_head nh;
+ struct raw_notifier_head *nh;
struct list_head entry;
int max_supported;
spinlock_t lock; /* could be called by irq handler */
@@ -161,8 +149,6 @@ struct extcon_cable {
/**
* struct extcon_specific_cable_nb - An internal data for
* extcon_register_interest().
- * @internal_nb: A notifier block bridging extcon notifier
- * and cable notifier.
* @user_nb: user provided notifier block for events from
* a specific cable.
* @cable_index: the target cable.
@@ -170,7 +156,6 @@ struct extcon_cable {
* @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
- struct notifier_block internal_nb;
struct notifier_block *user_nb;
int cable_index;
struct extcon_dev *edev;
@@ -194,10 +179,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
/*
* Following APIs control the memory of extcon device.
*/
-extern struct extcon_dev *extcon_dev_allocate(const char **cables);
+extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/*
@@ -216,13 +201,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
/*
* get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable_name
- * or cable_index, which is retrieved by extcon_find_cable_index
+ * They are used to access the status of each cable based on the cable_name.
*/
-extern int extcon_find_cable_index(struct extcon_dev *sdev,
- const char *cable_name);
-extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
-extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
+extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
+extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
bool cable_state);
extern int extcon_get_cable_state(struct extcon_dev *edev,
@@ -249,16 +231,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
* we do not recommend to use this for normal 'notifiee' device drivers who
* want to be notified by a specific external port of the notifier.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev,
+extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb);
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
*/
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index);
+extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+ int index);
+
+/* Following API to get information of extcon device */
+extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -276,7 +263,7 @@ static inline int devm_extcon_dev_register(struct device *dev,
static inline void devm_extcon_dev_unregister(struct device *dev,
struct extcon_dev *edev) { }
-static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -284,7 +271,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -307,20 +294,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
return 0;
}
-static inline int extcon_find_cable_index(struct extcon_dev *edev,
- const char *cable_name)
-{
- return 0;
-}
-
static inline int extcon_get_cable_state_(struct extcon_dev *edev,
- int cable_index)
+ unsigned int id)
{
return 0;
}
static inline int extcon_set_cable_state_(struct extcon_dev *edev,
- int cable_index, bool cable_state)
+ unsigned int id, bool cable_state)
{
return 0;
}
@@ -343,13 +324,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
}
static inline int extcon_register_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
diff --git a/kernel/include/linux/extcon/extcon-adc-jack.h b/kernel/include/linux/extcon/extcon-adc-jack.h
index 9ca958c4e..53c60806b 100644
--- a/kernel/include/linux/extcon/extcon-adc-jack.h
+++ b/kernel/include/linux/extcon/extcon-adc-jack.h
@@ -44,7 +44,7 @@ struct adc_jack_cond {
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels used within
* the consumer. E.g. 'battery_voltage'
- * @cable_names: array of cable names ending with null.
+ * @cable_names: array of extcon id for supported cables.
* @adc_contitions: array of struct adc_jack_cond conditions ending
* with .state = 0 entry. This describes how to decode
* adc values into extcon state.
@@ -58,8 +58,7 @@ struct adc_jack_pdata {
const char *name;
const char *consumer_channel;
- /* The last entry should be NULL */
- const char **cable_names;
+ const enum extcon *cable_names;
/* The last entry's state should be 0 */
struct adc_jack_cond *adc_conditions;
diff --git a/kernel/include/linux/extcon/extcon-gpio.h b/kernel/include/linux/extcon/extcon-gpio.h
index 0b17ad43f..7cacafb78 100644
--- a/kernel/include/linux/extcon/extcon-gpio.h
+++ b/kernel/include/linux/extcon/extcon-gpio.h
@@ -1,5 +1,5 @@
/*
- * External connector (extcon) class generic GPIO driver
+ * Single-state GPIO extcon driver based on extcon class
*
* Copyright (C) 2012 Samsung Electronics
* Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -16,43 +16,31 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
-*/
+ */
#ifndef __EXTCON_GPIO_H__
#define __EXTCON_GPIO_H__ __FILE__
#include <linux/extcon.h>
/**
- * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device.
- * @name: The name of this GPIO extcon device.
+ * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device.
+ * @extcon_id: The unique id of specific external connector.
* @gpio: Corresponding GPIO.
* @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
* If true, low state of gpio means active.
* If false, high state of gpio means active.
* @debounce: Debounce time for GPIO IRQ in ms.
* @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
- * @state_on: print_state is overriden with state_on if attached.
- * If NULL, default method of extcon class is used.
- * @state_off: print_state is overriden with state_off if detached.
- * If NUll, default method of extcon class is used.
* @check_on_resume: Boolean describing whether to check the state of gpio
* while resuming from sleep.
- *
- * Note that in order for state_on or state_off to be valid, both state_on
- * and state_off should be not NULL. If at least one of them is NULL,
- * the print_state is not overriden.
*/
-struct gpio_extcon_platform_data {
- const char *name;
+struct gpio_extcon_pdata {
+ unsigned int extcon_id;
unsigned gpio;
bool gpio_active_low;
unsigned long debounce;
unsigned long irq_flags;
- /* if NULL, "0" or "1" will be printed */
- const char *state_on;
- const char *state_off;
bool check_on_resume;
};
diff --git a/kernel/include/linux/f2fs_fs.h b/kernel/include/linux/f2fs_fs.h
index 591f8c3ef..25c6324a0 100644
--- a/kernel/include/linux/f2fs_fs.h
+++ b/kernel/include/linux/f2fs_fs.h
@@ -50,6 +50,8 @@
#define MAX_ACTIVE_NODE_LOGS 8
#define MAX_ACTIVE_DATA_LOGS 8
+#define VERSION_LEN 256
+
/*
* For superblock
*/
@@ -86,6 +88,12 @@ struct f2fs_super_block {
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
+ __u8 version[VERSION_LEN]; /* the kernel version */
+ __u8 init_version[VERSION_LEN]; /* the initial kernel version */
+ __le32 feature; /* defined features */
+ __u8 encryption_level; /* versioning level for encryption */
+ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
+ __u8 reserved[871]; /* valid reserved region */
} __packed;
/*
@@ -409,15 +417,25 @@ typedef __le32 f2fs_hash_t;
#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK 214
-
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+/*
+ * space utilization of regular dentry and inline dentry
+ * regular dentry inline dentry
+ * bitmap 1 * 27 = 27 1 * 23 = 23
+ * reserved 1 * 3 = 3 1 * 7 = 7
+ * dentry 11 * 214 = 2354 11 * 182 = 2002
+ * filename 8 * 214 = 1712 8 * 182 = 1456
+ * total 4096 3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
diff --git a/kernel/include/linux/fault-inject.h b/kernel/include/linux/fault-inject.h
index 798fad9e4..3159a7dba 100644
--- a/kernel/include/linux/fault-inject.h
+++ b/kernel/include/linux/fault-inject.h
@@ -18,7 +18,7 @@ struct fault_attr {
atomic_t times;
atomic_t space;
unsigned long verbose;
- u32 task_filter;
+ bool task_filter;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
diff --git a/kernel/include/linux/fb.h b/kernel/include/linux/fb.h
index 043f3283b..3d003805a 100644
--- a/kernel/include/linux/fb.h
+++ b/kernel/include/linux/fb.h
@@ -156,7 +156,7 @@ struct fb_cursor_user {
#define FB_EVENT_GET_REQ 0x0D
/* Unbind from the console if possible */
#define FB_EVENT_FB_UNBIND 0x0E
-/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
+/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
/* A hardware display blank early change occured */
#define FB_EARLY_EVENT_BLANK 0x10
@@ -483,7 +483,10 @@ struct fb_info {
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
#endif
- char __iomem *screen_base; /* Virtual address */
+ union {
+ char __iomem *screen_base; /* Virtual address */
+ char *screen_buffer;
+ };
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
@@ -788,7 +791,7 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/kernel/include/linux/fdtable.h b/kernel/include/linux/fdtable.h
index 230f87bdf..5295535b6 100644
--- a/kernel/include/linux/fdtable.h
+++ b/kernel/include/linux/fdtable.h
@@ -26,6 +26,7 @@ struct fdtable {
struct file __rcu **fd; /* current fd array */
unsigned long *close_on_exec;
unsigned long *open_fds;
+ unsigned long *full_fds_bits;
struct rcu_head rcu;
};
@@ -47,6 +48,9 @@ struct files_struct {
* read mostly part
*/
atomic_t count;
+ bool resize_in_progress;
+ wait_queue_head_t resize_wait;
+
struct fdtable __rcu *fdt;
struct fdtable fdtab;
/*
@@ -56,6 +60,7 @@ struct files_struct {
int next_fd;
unsigned long close_on_exec_init[1];
unsigned long open_fds_init[1];
+ unsigned long full_fds_bits_init[1];
struct file __rcu * fd_array[NR_OPEN_DEFAULT];
};
@@ -83,8 +88,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ !lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd);
}
diff --git a/kernel/include/linux/fence.h b/kernel/include/linux/fence.h
index 39efee130..bb5220113 100644
--- a/kernel/include/linux/fence.h
+++ b/kernel/include/linux/fence.h
@@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence)
}
/**
+ * fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool fence_is_later(struct fence *f1, struct fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return f1->seqno - f2->seqno < INT_MAX;
+}
+
+/**
* fence_later - return the chronologically later fence
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
@@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
* set if enable_signaling wasn't called, and enabling that here is
* overkill.
*/
- if (f2->seqno - f1->seqno <= INT_MAX)
- return fence_is_signaled(f2) ? NULL : f2;
- else
+ if (fence_is_later(f1, f2))
return fence_is_signaled(f1) ? NULL : f1;
+ else
+ return fence_is_signaled(f2) ? NULL : f2;
}
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-
+signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
+ bool intr, signed long timeout);
/**
* fence_wait - sleep until the fence gets signaled
diff --git a/kernel/include/linux/filter.h b/kernel/include/linux/filter.h
index fa11b3a36..5972ffe57 100644
--- a/kernel/include/linux/filter.h
+++ b/kernel/include/linux/filter.h
@@ -12,6 +12,8 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <net/sch_generic.h>
#include <asm/cacheflush.h>
@@ -207,6 +209,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -267,6 +279,14 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K) \
+ ((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF) \
+ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
#define bytes_to_bpf_size(bytes) \
({ \
int bpf_size = -EINVAL; \
@@ -283,10 +303,6 @@ struct bpf_prog_aux;
bpf_size; \
})
-/* Macro to invoke filter function. */
-#define SK_RUN_FILTER(filter, ctx) \
- (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
-
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
@@ -307,8 +323,12 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
- bool jited; /* Is our filter JIT'ed? */
- bool gpl_compatible; /* Is our filter GPL compatible? */
+ kmemcheck_bitfield_begin(meta);
+ u16 jited:1, /* Is our filter JIT'ed? */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1; /* Do we need dst entry? */
+ kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -330,12 +350,55 @@ struct sk_filter {
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u8 *cb_data = qdisc_skb_cb(skb)->data;
+ u8 saved_cb[QDISC_CB_PRIV_LEN];
+ u32 res;
+
+ BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
+ QDISC_CB_PRIV_LEN);
+
+ if (unlikely(prog->cb_access)) {
+ memcpy(saved_cb, cb_data, sizeof(saved_cb));
+ memset(cb_data, 0, sizeof(saved_cb));
+ }
+
+ res = BPF_PROG_RUN(prog, skb);
+
+ if (unlikely(prog->cb_access))
+ memcpy(cb_data, saved_cb, sizeof(saved_cb));
+
+ return res;
+}
+
+static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u8 *cb_data = qdisc_skb_cb(skb)->data;
+
+ if (unlikely(prog->cb_access))
+ memset(cb_data, 0, QDISC_CB_PRIV_LEN);
+ return BPF_PROG_RUN(prog, skb);
+}
+
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
offsetof(struct bpf_prog, insns[proglen]));
}
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+ /* When classic BPF programs have been loaded and the arch
+ * does not have a classic BPF JIT (anymore), they have been
+ * converted via bpf_migrate_filter() to eBPF and thus always
+ * have an unspec program type.
+ */
+ return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -360,12 +423,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);
-int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len);
-
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -377,14 +437,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
__bpf_prog_free(fp);
}
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+ unsigned int flen);
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -393,6 +456,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -409,8 +473,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
+ pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ proglen, pass, image, current->comm, task_pid_nr(current));
+
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
@@ -428,6 +493,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp)
#define BPF_ANC BIT(15)
+static inline bool bpf_needs_clear_a(const struct sock_filter *first)
+{
+ switch (first->code) {
+ case BPF_RET | BPF_K:
+ case BPF_LD | BPF_W | BPF_LEN:
+ return false;
+
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
+ if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
+ return true;
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
BUG_ON(ftest->code & BPF_ANC);
diff --git a/kernel/include/linux/fpga/fpga-mgr.h b/kernel/include/linux/fpga/fpga-mgr.h
new file mode 100644
index 000000000..0940bf45e
--- /dev/null
+++ b/kernel/include/linux/fpga/fpga-mgr.h
@@ -0,0 +1,127 @@
+/*
+ * FPGA Framework
+ *
+ * Copyright (C) 2013-2015 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#ifndef _LINUX_FPGA_MGR_H
+#define _LINUX_FPGA_MGR_H
+
+struct fpga_manager;
+
+/**
+ * enum fpga_mgr_states - fpga framework states
+ * @FPGA_MGR_STATE_UNKNOWN: can't determine state
+ * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off
+ * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up
+ * @FPGA_MGR_STATE_RESET: FPGA in reset state
+ * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
+ * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_MGR_STATE_WRITE: writing image to FPGA
+ * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating
+ */
+enum fpga_mgr_states {
+ /* default FPGA states */
+ FPGA_MGR_STATE_UNKNOWN,
+ FPGA_MGR_STATE_POWER_OFF,
+ FPGA_MGR_STATE_POWER_UP,
+ FPGA_MGR_STATE_RESET,
+
+ /* getting an image for loading */
+ FPGA_MGR_STATE_FIRMWARE_REQ,
+ FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
+
+ /* write sequence: init, write, complete */
+ FPGA_MGR_STATE_WRITE_INIT,
+ FPGA_MGR_STATE_WRITE_INIT_ERR,
+ FPGA_MGR_STATE_WRITE,
+ FPGA_MGR_STATE_WRITE_ERR,
+ FPGA_MGR_STATE_WRITE_COMPLETE,
+ FPGA_MGR_STATE_WRITE_COMPLETE_ERR,
+
+ /* fpga is programmed and operating */
+ FPGA_MGR_STATE_OPERATING,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_manager_ops - ops for low level fpga manager drivers
+ * @state: returns an enum value of the FPGA's state
+ * @write_init: prepare the FPGA to receive confuration data
+ * @write: write count bytes of configuration data to the FPGA
+ * @write_complete: set FPGA to operating state after writing is done
+ * @fpga_remove: optional: Set FPGA into a specific state during driver remove
+ *
+ * fpga_manager_ops are the low level functions implemented by a specific
+ * fpga manager driver. The optional ones are tested for NULL before being
+ * called, so leaving them out is fine.
+ */
+struct fpga_manager_ops {
+ enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
+ int (*write_init)(struct fpga_manager *mgr, u32 flags,
+ const char *buf, size_t count);
+ int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
+ int (*write_complete)(struct fpga_manager *mgr, u32 flags);
+ void (*fpga_remove)(struct fpga_manager *mgr);
+};
+
+/**
+ * struct fpga_manager - fpga manager structure
+ * @name: name of low level fpga manager
+ * @dev: fpga manager device
+ * @ref_mutex: only allows one reference to fpga manager
+ * @state: state of fpga manager
+ * @mops: pointer to struct of fpga manager ops
+ * @priv: low level driver private date
+ */
+struct fpga_manager {
+ const char *name;
+ struct device dev;
+ struct mutex ref_mutex;
+ enum fpga_mgr_states state;
+ const struct fpga_manager_ops *mops;
+ void *priv;
+};
+
+#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
+
+int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+ const char *buf, size_t count);
+
+int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+ const char *image_name);
+
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+
+void fpga_mgr_put(struct fpga_manager *mgr);
+
+int fpga_mgr_register(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
+
+void fpga_mgr_unregister(struct device *dev);
+
+#endif /*_LINUX_FPGA_MGR_H */
diff --git a/kernel/include/linux/frontswap.h b/kernel/include/linux/frontswap.h
index 829326240..e65ef9595 100644
--- a/kernel/include/linux/frontswap.h
+++ b/kernel/include/linux/frontswap.h
@@ -6,16 +6,16 @@
#include <linux/bitops.h>
struct frontswap_ops {
- void (*init)(unsigned);
- int (*store)(unsigned, pgoff_t, struct page *);
- int (*load)(unsigned, pgoff_t, struct page *);
- void (*invalidate_page)(unsigned, pgoff_t);
- void (*invalidate_area)(unsigned);
+ void (*init)(unsigned); /* this swap type was just swapon'ed */
+ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
+ int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
+ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
+ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
+ struct frontswap_ops *next; /* private pointer to next ops */
};
extern bool frontswap_enabled;
-extern struct frontswap_ops *
- frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_register_ops(struct frontswap_ops *ops);
extern void frontswap_shrink(unsigned long);
extern unsigned long frontswap_curr_pages(void);
extern void frontswap_writethrough(bool);
diff --git a/kernel/include/linux/fs.h b/kernel/include/linux/fs.h
index 571aab91b..3aa514254 100644
--- a/kernel/include/linux/fs.h
+++ b/kernel/include/linux/fs.h
@@ -1,7 +1,6 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
-
#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/kdev_t.h>
@@ -30,15 +29,17 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/blk_types.h>
+#include <linux/workqueue.h>
+#include <linux/percpu-rwsem.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
struct backing_dev_info;
+struct bdi_writeback;
struct export_operations;
struct hd_geometry;
struct iovec;
-struct nameidata;
struct kiocb;
struct kobject;
struct pipe_inode_info;
@@ -51,11 +52,11 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
-extern void __init files_init(unsigned long);
+extern void __init files_init(void);
+extern void __init files_maxfiles_init(void);
extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
@@ -70,6 +71,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
+typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
@@ -634,7 +636,15 @@ struct inode {
unsigned long dirtied_time_when;
struct hlist_node i_hash;
- struct list_head i_wb_list; /* backing dev IO list */
+ struct list_head i_io_list; /* backing dev IO list */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *i_wb; /* the associated cgroup wb */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int i_wb_frn_winner;
+ u16 i_wb_frn_avg_time;
+ u16 i_wb_frn_history;
+#endif
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
union {
@@ -656,6 +666,7 @@ struct inode {
struct pipe_inode_info *i_pipe;
struct block_device *i_bdev;
struct cdev *i_cdev;
+ char *i_link;
};
__u32 i_generation;
@@ -932,12 +943,18 @@ struct lock_manager_operations {
struct lock_manager {
struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
};
struct net;
void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
int locks_in_grace(struct net *);
+int opens_in_grace(struct net *);
/* that will die - we need it for nfs_lock_info */
#include <linux/nfs_fs_i.h>
@@ -1036,12 +1053,11 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1127,11 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
-{
- return -ENOLCK;
-}
-
static inline int posix_unblock_lock(struct file_lock *waiter)
{
return -ENOENT;
@@ -1153,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return 0;
}
-static inline int flock_lock_file_wait(struct file *filp,
- struct file_lock *request)
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
return -ENOLCK;
}
@@ -1192,6 +1202,15 @@ static inline void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_inode;
+}
+
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return locks_lock_inode_wait(file_inode(filp), fl);
+}
struct fasync_struct {
spinlock_t fa_lock;
@@ -1232,6 +1251,9 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
/* Possible states of 'frozen' field */
enum {
@@ -1246,16 +1268,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- /* Counters for counting writers at each level */
- struct percpu_counter counter[SB_FREEZE_LEVELS];
- wait_queue_head_t wait; /* queue for waiting for
- writers / faults to finish */
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* queue for waiting for
- sb to be thawed */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map lock_map[SB_FREEZE_LEVELS];
-#endif
+ int frozen; /* Is sb frozen? */
+ wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct super_block {
@@ -1270,6 +1285,7 @@ struct super_block {
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
@@ -1280,7 +1296,6 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
- struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1346,11 +1361,18 @@ struct super_block {
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
/*
* Indicates how deep in a filesystem stack this SB is
*/
int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1362,6 +1384,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
void __sb_end_write(struct super_block *sb, int level);
int __sb_start_write(struct super_block *sb, int level, bool wait);
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1582,7 +1609,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -1607,12 +1633,12 @@ struct file_operations {
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
- void * (*follow_link) (struct dentry *, struct nameidata *);
+ const char * (*follow_link) (struct dentry *, void **);
int (*permission) (struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
- void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*put_link) (struct inode *, void *);
int (*create) (struct inode *,struct dentry *, umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1639,9 +1665,6 @@ struct inode_operations {
umode_t create_mode, int *opened);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
-
- /* WARNING: probably going away soon, do not use! */
- int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
} ____cacheline_aligned;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1806,6 +1829,11 @@ struct super_operations {
*
* I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
*
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab mapping->tree_lock. See
+ * inode_switch_wb_work_fn() for details.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -1825,6 +1853,7 @@ struct super_operations {
#define I_DIRTY_TIME (1 << 11)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
+#define I_WB_SWITCH (1 << 13)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
@@ -1879,6 +1908,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -1992,11 +2022,6 @@ extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern int generic_update_time(struct inode *, struct timespec *, int);
-static inline struct inode *file_inode(const struct file *f)
-{
- return f->f_inode;
-}
-
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2193,7 +2218,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
@@ -2217,7 +2241,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
+extern void __init vfs_caches_init(void);
extern struct kmem_cache *names_cachep;
@@ -2240,7 +2264,13 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-extern int sb_is_blkdev_sb(struct super_block *sb);
+
+extern struct super_block *blockdev_superblock;
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return sb == blockdev_superblock;
+}
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2279,6 +2309,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+extern int __blkdev_reread_part(struct block_device *bdev);
+extern int blkdev_reread_part(struct block_device *bdev);
+
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
extern void bd_unlink_disk_holder(struct block_device *bdev,
@@ -2374,6 +2407,7 @@ extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
extern int filemap_fdatawait(struct address_space *);
+extern void filemap_fdatawait_keep_errors(struct address_space *);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
@@ -2501,6 +2535,8 @@ extern struct file * open_exec(const char *);
extern int is_subdir(struct dentry *, struct dentry *);
extern int path_is_under(struct path *, struct path *);
+extern char *file_path(struct file *, char *, int);
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -2552,7 +2588,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
-extern int file_remove_suid(struct file *);
+extern int file_remove_privs(struct file *);
+extern int dentry_needs_remove_privs(struct dentry *dentry);
+static inline int file_needs_remove_privs(struct file *file)
+{
+ return dentry_needs_remove_privs(file->f_path.dentry);
+}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -2563,14 +2604,14 @@ static inline void insert_inode_hash(struct inode *inode)
extern void __remove_inode_hash(struct inode *);
static inline void remove_inode_hash(struct inode *inode)
{
- if (!inode_unhashed(inode))
+ if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
-extern void submit_bio(int, struct bio *);
+extern blk_qc_t submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
extern int set_blocksize(struct block_device *, int);
@@ -2622,15 +2663,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
- get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
-
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
@@ -2704,13 +2736,14 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern const char *page_follow_link_light(struct dentry *, void **);
+extern void page_put_link(struct inode *, void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
-extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern void kfree_put_link(struct inode *, void *);
+extern void free_page_put_link(struct inode *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2721,6 +2754,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
+const char *simple_follow_link(struct dentry *, void **);
+extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
@@ -2989,4 +3024,6 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
+extern bool path_noexec(const struct path *path);
+
#endif /* _LINUX_FS_H */
diff --git a/kernel/include/linux/fscache-cache.h b/kernel/include/linux/fscache-cache.h
index 771484993..604e1526c 100644
--- a/kernel/include/linux/fscache-cache.h
+++ b/kernel/include/linux/fscache-cache.h
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
*/
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
enum fscache_operation_state {
FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
@@ -109,6 +110,9 @@ struct fscache_operation {
* the op in a non-pool thread */
fscache_operation_processor_t processor;
+ /* Operation cancellation cleanup (optional) */
+ fscache_operation_cancel_t cancel;
+
/* operation releaser */
fscache_operation_release_t release;
};
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_op_complete(struct fscache_operation *, bool);
extern void fscache_put_operation(struct fscache_operation *);
-
-/**
- * fscache_operation_init - Do basic initialisation of an operation
- * @op: The operation to initialise
- * @release: The release function to assign
- *
- * Do basic initialisation of an operation. The caller must still set flags,
- * object and processor if needed.
- */
-static inline void fscache_operation_init(struct fscache_operation *op,
- fscache_operation_processor_t processor,
- fscache_operation_release_t release)
-{
- INIT_WORK(&op->work, fscache_op_work_func);
- atomic_set(&op->usage, 1);
- op->state = FSCACHE_OP_ST_INITIALISED;
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->processor = processor;
- op->release = release;
- INIT_LIST_HEAD(&op->pend_link);
-}
+extern void fscache_operation_init(struct fscache_operation *,
+ fscache_operation_processor_t,
+ fscache_operation_cancel_t,
+ fscache_operation_release_t);
/*
* data read operation
*/
struct fscache_retrieval {
struct fscache_operation op;
+ struct fscache_cookie *cookie; /* The netfs cookie */
struct address_space *mapping; /* netfs pages */
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
@@ -371,6 +359,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
+#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object)
return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
}
-static inline bool fscache_object_is_active(struct fscache_object *object)
+static inline bool fscache_cache_is_broken(struct fscache_object *object)
{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !test_bit(FSCACHE_IOERROR, &object->cache->flags);
+ return test_bit(FSCACHE_IOERROR, &object->cache->flags);
}
-static inline bool fscache_object_is_dead(struct fscache_object *object)
+static inline bool fscache_object_is_active(struct fscache_object *object)
{
- return fscache_object_is_dying(object) &&
- test_bit(FSCACHE_IOERROR, &object->cache->flags);
+ return fscache_object_is_available(object) &&
+ fscache_object_is_live(object) &&
+ !fscache_cache_is_broken(object);
}
/**
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);
+extern void fscache_object_retrying_stale(struct fscache_object *object);
+
+enum fscache_why_object_killed {
+ FSCACHE_OBJECT_IS_STALE,
+ FSCACHE_OBJECT_NO_SPACE,
+ FSCACHE_OBJECT_WAS_RETIRED,
+ FSCACHE_OBJECT_WAS_CULLED,
+};
+extern void fscache_object_mark_killed(struct fscache_object *object,
+ enum fscache_why_object_killed why);
+
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/kernel/include/linux/fsl/guts.h b/kernel/include/linux/fsl/guts.h
new file mode 100644
index 000000000..84d971ff3
--- /dev/null
+++ b/kernel/include/linux/fsl/guts.h
@@ -0,0 +1,192 @@
+/**
+ * Freecale 85xx and 86xx Global Utilties register set
+ *
+ * Authors: Jeff Brown
+ * Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __FSL_GUTS_H__
+#define __FSL_GUTS_H__
+
+#include <linux/types.h>
+
+/**
+ * Global Utility Registers.
+ *
+ * Not all registers defined in this structure are available on all chips, so
+ * you are expected to know whether a given register actually exists on your
+ * chip before you access it.
+ *
+ * Also, some registers are similar on different chips but have slightly
+ * different names. In these cases, one name is chosen to avoid extraneous
+ * #ifdefs.
+ */
+struct ccsr_guts {
+ __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+ __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+ __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
+ __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
+ __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+ __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
+ u8 res018[0x20 - 0x18];
+ __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
+ u8 res024[0x30 - 0x24];
+ __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
+ u8 res034[0x40 - 0x34];
+ __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
+ u8 res044[0x50 - 0x44];
+ __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
+ u8 res054[0x60 - 0x54];
+ __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
+ __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
+ __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
+ u8 res06c[0x70 - 0x6c];
+ __be32 devdisr; /* 0x.0070 - Device Disable Control */
+#define CCSR_GUTS_DEVDISR_TB1 0x00001000
+#define CCSR_GUTS_DEVDISR_TB0 0x00004000
+ __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
+ u8 res078[0x7c - 0x78];
+ __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
+ __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
+ __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
+ __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
+ __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
+ __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+ __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
+ __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
+ __be32 autorstsr; /* 0x.009c - Automatic reset status register */
+ __be32 pvr; /* 0x.00a0 - Processor Version Register */
+ __be32 svr; /* 0x.00a4 - System Version Register */
+ u8 res0a8[0xb0 - 0xa8];
+ __be32 rstcr; /* 0x.00b0 - Reset Control Register */
+ u8 res0b4[0xc0 - 0xb4];
+ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ Called 'elbcvselcr' on 86xx SOCs */
+ u8 res0c4[0x100 - 0xc4];
+ __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
+ There are 16 registers */
+ u8 res140[0x224 - 0x140];
+ __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
+ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
+ u8 res22c[0x604 - 0x22c];
+ __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u8 res608[0x800 - 0x608];
+ __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
+ u8 res804[0x900 - 0x804];
+ __be32 ircr; /* 0x.0900 - Infrared Control Register */
+ u8 res904[0x908 - 0x904];
+ __be32 dmacr; /* 0x.0908 - DMA Control Register */
+ u8 res90c[0x914 - 0x90c];
+ __be32 elbccr; /* 0x.0914 - eLBC Control Register */
+ u8 res918[0xb20 - 0x918];
+ __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
+ __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
+ __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
+ u8 resb2c[0xe00 - 0xb2c];
+ __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
+ u8 rese04[0xe10 - 0xe04];
+ __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ u8 rese14[0xe20 - 0xe14];
+ __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+ __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
+ u8 rese28[0xf04 - 0xe28];
+ __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
+ __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
+ u8 resf0c[0xf2c - 0xf0c];
+ __be32 itcr; /* 0x.0f2c - Internal transaction control register */
+ u8 resf30[0xf40 - 0xf30];
+ __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
+ __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
+} __attribute__ ((packed));
+
+
+/* Alternate function signal multiplex control */
+#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
+
+#ifdef CONFIG_PPC_86xx
+
+#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
+#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */
+
+/*
+ * Set the DMACR register in the GUTS
+ *
+ * The DMACR register determines the source of initiated transfers for each
+ * channel on each DMA controller. Rather than have a bunch of repetitive
+ * macros for the bit patterns, we just have a function that calculates
+ * them.
+ *
+ * guts: Pointer to GUTS structure
+ * co: The DMA controller (0 or 1)
+ * ch: The channel on the DMA controller (0, 1, 2, or 3)
+ * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
+ */
+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
+ unsigned int co, unsigned int ch, unsigned int device)
+{
+ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
+
+ clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift);
+}
+
+#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000
+#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */
+#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */
+#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */
+#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */
+#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */
+#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */
+#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */
+#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */
+#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */
+#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */
+#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */
+#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008
+#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004
+#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002
+#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001
+
+/*
+ * Set the DMA external control bits in the GUTS
+ *
+ * The DMA external control bits in the PMUXCR are only meaningful for
+ * channels 0 and 3. Any other channels are ignored.
+ *
+ * guts: Pointer to GUTS structure
+ * co: The DMA controller (0 or 1)
+ * ch: The channel on the DMA controller (0, 1, 2, or 3)
+ * value: the new value for the bit (0 or 1)
+ */
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
+ unsigned int co, unsigned int ch, unsigned int value)
+{
+ if ((ch == 0) || (ch == 3)) {
+ unsigned int shift = 2 * (co + 1) - (ch & 1) - 1;
+
+ clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift);
+ }
+}
+
+#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000
+#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000
+#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000
+#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25
+#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000
+#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \
+ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT)
+#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16
+#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000
+#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT)
+#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF
+#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK)
+
+#endif
+
+#endif
diff --git a/kernel/include/linux/fsl_devices.h b/kernel/include/linux/fsl_devices.h
index a82296af4..f29129141 100644
--- a/kernel/include/linux/fsl_devices.h
+++ b/kernel/include/linux/fsl_devices.h
@@ -20,10 +20,6 @@
#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
PHY CLK to become stable - 10ms*/
#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
-#define FSL_USB_VER_OLD 0
-#define FSL_USB_VER_1_6 1
-#define FSL_USB_VER_2_2 2
-#define FSL_USB_VER_2_4 3
#include <linux/types.h>
@@ -51,6 +47,15 @@
*
*/
+enum fsl_usb2_controller_ver {
+ FSL_USB_VER_NONE = -1,
+ FSL_USB_VER_OLD = 0,
+ FSL_USB_VER_1_6 = 1,
+ FSL_USB_VER_2_2 = 2,
+ FSL_USB_VER_2_4 = 3,
+ FSL_USB_VER_2_5 = 4,
+};
+
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
@@ -64,6 +69,7 @@ enum fsl_usb2_phy_modes {
FSL_USB2_PHY_UTMI,
FSL_USB2_PHY_UTMI_WIDE,
FSL_USB2_PHY_SERIAL,
+ FSL_USB2_PHY_UTMI_DUAL,
};
struct clk;
@@ -71,7 +77,7 @@ struct platform_device;
struct fsl_usb2_platform_data {
/* board specific information */
- int controller_ver;
+ enum fsl_usb2_controller_ver controller_ver;
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
@@ -92,6 +98,9 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_a005275:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
diff --git a/kernel/include/linux/fsl_ifc.h b/kernel/include/linux/fsl_ifc.h
index bf0321eab..0023088b2 100644
--- a/kernel/include/linux/fsl_ifc.h
+++ b/kernel/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
+ bool little_endian;
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+static inline u32 ifc_in32(void __iomem *addr)
+{
+ u32 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread32(addr);
+ else
+ val = ioread32be(addr);
+
+ return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+ u16 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread16(addr);
+ else
+ val = ioread16be(addr);
+
+ return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite16(val, addr);
+ else
+ iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
#endif /* __ASM_FSL_IFC_H */
diff --git a/kernel/include/linux/fsnotify_backend.h b/kernel/include/linux/fsnotify_backend.h
index 0f313f93c..533c44085 100644
--- a/kernel/include/linux/fsnotify_backend.h
+++ b/kernel/include/linux/fsnotify_backend.h
@@ -84,8 +84,6 @@ struct fsnotify_fname;
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
- * should_send_event - given a group, inode, and mask this function determines
- * if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
@@ -197,40 +195,49 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
- * a mark is simply an object attached to an in core inode which allows an
+ * A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
- * these are flushed when an inode is evicted from core and may be flushed
- * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
- * (such as dnotify) will flush these when the open fd is closed and not at
- * inode eviction or modification.
+ * These are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify
+ * users (such as dnotify) will flush these when the open fd is closed and not
+ * at inode eviction or modification.
+ *
+ * Text in brackets is showing the lock(s) protecting modifications of a
+ * particular entry. obj_lock means either inode->i_lock or
+ * mnt->mnt_root->d_lock depending on the mark type.
*/
struct fsnotify_mark {
- __u32 mask; /* mask this mark is for */
- /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ /* Mask this mark is for [mark->lock, group->mark_mutex] */
+ __u32 mask;
+ /* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt; /* active things looking at this mark */
- struct fsnotify_group *group; /* group this mark is for */
- struct list_head g_list; /* list of marks by group->i_fsnotify_marks
- * Also reused for queueing mark into
- * destroy_list when it's waiting for
- * the end of SRCU period before it can
- * be freed */
- spinlock_t lock; /* protect group and inode */
- struct hlist_node obj_list; /* list of marks for inode / vfsmount */
- struct list_head free_list; /* tmp list used when freeing this mark */
- union {
+ atomic_t refcnt;
+ /* Group this mark is for. Set on mark creation, stable until last ref
+ * is dropped */
+ struct fsnotify_group *group;
+ /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ * mark into destroy_list when it's waiting for the end of SRCU period
+ * before it can be freed. [group->mark_mutex] */
+ struct list_head g_list;
+ /* Protects inode / mnt pointers, flags, masks */
+ spinlock_t lock;
+ /* List of marks for inode / vfsmount [obj_lock] */
+ struct hlist_node obj_list;
+ union { /* Object pointer [mark->lock, group->mark_mutex] */
struct inode *inode; /* inode this mark is associated with */
struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
- __u32 ignored_mask; /* events types to ignore */
+ /* Events types to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignored_mask;
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
- unsigned int flags; /* vfsmount or inode mark? */
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
+ unsigned int flags; /* flags [mark->lock] */
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
@@ -347,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
-extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
- struct fsnotify_group *group);
+/* detach mark from inode / mount list, group list, drop inode reference */
+extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
+/* free mark */
+extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
@@ -359,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct list_head *list);
+extern void fsnotify_unmount_inodes(struct super_block *sb);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -395,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
return 0;
}
-static inline void fsnotify_unmount_inodes(struct list_head *list)
+static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
#endif /* CONFIG_FSNOTIFY */
diff --git a/kernel/include/linux/ftrace.h b/kernel/include/linux/ftrace.h
index 6cd8c0ee4..f2cd67624 100644
--- a/kernel/include/linux/ftrace.h
+++ b/kernel/include/linux/ftrace.h
@@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER
+
+#define STACK_TRACE_ENTRIES 500
+
+struct stack_trace;
+
+extern unsigned stack_trace_index[];
+extern struct stack_trace stack_trace_max;
+extern unsigned long stack_trace_max_size;
+extern arch_spinlock_t stack_trace_max_lock;
+
extern int stack_tracer_enabled;
+void stack_trace_print(void);
int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -575,6 +586,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
+extern void ftrace_release_mod(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
@@ -682,6 +694,18 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+static inline unsigned long get_lock_parent_ip(void)
+{
+ unsigned long addr = CALLER_ADDR0;
+
+ if (!in_lock_functions(addr))
+ return addr;
+ addr = CALLER_ADDR1;
+ if (!in_lock_functions(addr))
+ return addr;
+ return CALLER_ADDR2;
+}
+
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
diff --git a/kernel/include/linux/fwnode.h b/kernel/include/linux/fwnode.h
index 0408545bc..851671742 100644
--- a/kernel/include/linux/fwnode.h
+++ b/kernel/include/linux/fwnode.h
@@ -16,7 +16,9 @@ enum fwnode_type {
FWNODE_INVALID = 0,
FWNODE_OF,
FWNODE_ACPI,
+ FWNODE_ACPI_DATA,
FWNODE_PDATA,
+ FWNODE_IRQCHIP,
};
struct fwnode_handle {
diff --git a/kernel/include/linux/genalloc.h b/kernel/include/linux/genalloc.h
index 1ccaab44a..7ff168d06 100644
--- a/kernel/include/linux/genalloc.h
+++ b/kernel/include/linux/genalloc.h
@@ -59,6 +59,8 @@ struct gen_pool {
genpool_algo_t algo; /* allocation function */
void *data;
+
+ const char *name;
};
/*
@@ -118,17 +120,17 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
- int min_alloc_order, int nid);
-extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+ int min_alloc_order, int nid, const char *name);
+extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size);
#ifdef CONFIG_OF
-extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+extern struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index);
#else
-static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index)
{
return NULL;
diff --git a/kernel/include/linux/genetlink.h b/kernel/include/linux/genetlink.h
index 09460d6d6..a4c61cbce 100644
--- a/kernel/include/linux/genetlink.h
+++ b/kernel/include/linux/genetlink.h
@@ -8,7 +8,7 @@
extern void genl_lock(void);
extern void genl_unlock(void);
#ifdef CONFIG_LOCKDEP
-extern int lockdep_genl_is_held(void);
+extern bool lockdep_genl_is_held(void);
#endif
/* for synchronisation between af_netlink and genetlink */
diff --git a/kernel/include/linux/genhd.h b/kernel/include/linux/genhd.h
index ec274e0f4..847cc1d91 100644
--- a/kernel/include/linux/genhd.h
+++ b/kernel/include/linux/genhd.h
@@ -13,6 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
#ifdef CONFIG_BLOCK
@@ -124,7 +125,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
- atomic_t ref;
+ struct percpu_ref ref;
struct rcu_head rcu_head;
};
@@ -162,6 +163,18 @@ struct disk_part_tbl {
struct disk_events;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+struct blk_integrity {
+ struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+};
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
struct gendisk {
/* major, first_minor and minors are input parameters only,
* don't use directly. Use disk_devt() and disk_max_parts().
@@ -197,8 +210,8 @@ struct gendisk {
atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity *integrity;
-#endif
+ struct kobject integrity_kobj;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
int node_id;
};
@@ -611,7 +624,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
sector_t len, int flags,
struct partition_meta_info
*info);
-extern void __delete_partition(struct hd_struct *);
+extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
@@ -640,27 +653,39 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline void hd_ref_init(struct hd_struct *part)
+static inline int hd_ref_init(struct hd_struct *part)
{
- atomic_set(&part->ref, 1);
- smp_mb();
+ if (percpu_ref_init(&part->ref, __delete_partition, 0,
+ GFP_KERNEL))
+ return -ENOMEM;
+ return 0;
}
static inline void hd_struct_get(struct hd_struct *part)
{
- atomic_inc(&part->ref);
- smp_mb__after_atomic();
+ percpu_ref_get(&part->ref);
}
static inline int hd_struct_try_get(struct hd_struct *part)
{
- return atomic_inc_not_zero(&part->ref);
+ return percpu_ref_tryget_live(&part->ref);
}
static inline void hd_struct_put(struct hd_struct *part)
{
- if (atomic_dec_and_test(&part->ref))
- __delete_partition(part);
+ percpu_ref_put(&part->ref);
+}
+
+static inline void hd_struct_kill(struct hd_struct *part)
+{
+ percpu_ref_kill(&part->ref);
+}
+
+static inline void hd_free_part(struct hd_struct *part)
+{
+ free_part_stats(part);
+ free_part_info(part);
+ percpu_ref_exit(&part->ref);
}
/*
@@ -714,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#endif
}
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+extern void blk_integrity_add(struct gendisk *);
+extern void blk_integrity_del(struct gendisk *);
+extern void blk_integrity_revalidate(struct gendisk *);
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline void blk_integrity_add(struct gendisk *disk) { }
+static inline void blk_integrity_del(struct gendisk *disk) { }
+static inline void blk_integrity_revalidate(struct gendisk *disk) { }
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { }
diff --git a/kernel/include/linux/gfp.h b/kernel/include/linux/gfp.h
index 15928f064..8942af081 100644
--- a/kernel/include/linux/gfp.h
+++ b/kernel/include/linux/gfp.h
@@ -14,7 +14,7 @@ struct vm_area_struct;
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
-#define ___GFP_WAIT 0x10u
+#define ___GFP_RECLAIMABLE 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
@@ -29,18 +29,17 @@ struct vm_area_struct;
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
-#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_ATOMIC 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_NO_KSWAPD 0x400000u
+#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
+#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
- * GFP bitmasks..
- *
- * Zone modifiers (see linux/mmzone.h - low three bits)
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
@@ -50,113 +49,229 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/*
+ * Page mobility and placement hints
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * __GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * __GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * __GFP_THISNODE forces the allocation to be satisified from the requested
+ * node with no fallbacks or placement policy enforcements.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+
+/*
+ * Watermark modifiers -- controls access to emergency reserves
+ *
+ * __GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
+ *
+ * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with __GFP_HIGH
+ *
+ * __GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ *
+ * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the __GFP_MEMALLOC flag if both are set.
+ *
+ * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
+ */
+#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
+
/*
- * Action modifiers - doesn't change the zoning
+ * Reclaim modifiers
+ *
+ * __GFP_IO can start physical IO.
+ *
+ * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
+ * _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. New users should be evaluated carefully
- * (and the flag should be used only when there is no reasonable failure policy)
- * but it is definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure
+ * policy) but it is definitely preferable to use the flag rather than
+ * opencode endless loop around allocator.
*
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
- *
- * __GFP_MOVABLE: Flag that this page will be movable by the page migration
- * mechanism or reclaimed
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
+ * return NULL when direct reclaim and memory compaction have failed to allow
+ * the allocation to succeed. The OOM killer is not called with the current
+ * implementation.
*/
-#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
-#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
-#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
-#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
-#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
- * This takes precedence over the
- * __GFP_MEMALLOC flag if both are
- * set
- */
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
-#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
-#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
-
-#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
/*
- * This may seem redundant, but it's a way of annotating false positives vs.
- * allocations that simply cannot be supported (e.g. page tables).
+ * Action modifiers
+ *
+ * __GFP_COLD indicates that the caller does not expect to be used in the near
+ * future. Where possible, a cache-cold page will be returned.
+ *
+ * __GFP_NOWARN suppresses allocation failure reports.
+ *
+ * __GFP_COMP address compound page metadata.
+ *
+ * __GFP_ZERO returns a zeroed page on success.
+ *
+ * __GFP_NOTRACK avoids tracking with kmemcheck.
+ *
+ * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
+ * distinguishing in the source between false positives and allocations that
+ * cannot be supported (e.g. page tables).
+ *
+ * __GFP_OTHER_NODE is for allocations that are on a remote node but that
+ * should not be accounted for as a remote allocation in vmstat. A
+ * typical user would be khugepaged collapsing a huge page on a remote
+ * node.
*/
+#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-/* This equals 0, but use constants in case they ever change */
-#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
-/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
-#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_NOIO (__GFP_WAIT)
-#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+/*
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * __GFP_FOO flags as necessary.
+ *
+ * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves"
+ *
+ * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ *
+ * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ *
+ * GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
+ * address.
+ *
+ * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
+ *
+ * GFP_TRANSHUGE is used for THP allocations. They are compound allocations
+ * that will fail quickly if memory is not available and will not wake
+ * kswapd on failure.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
-#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_IOFS (__GFP_IO | __GFP_FS)
-#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
- __GFP_NO_KSWAPD)
+#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
+ ~__GFP_KSWAPD_RECLAIM)
-/* This mask makes up all the page movable related flags */
+/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+#define GFP_MOVABLE_SHIFT 3
-/* Control page allocator reclaim behavior */
-#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
- __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
- __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
-
-/* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
-
-/* Control allocation constraints */
-#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
-
-/* Do not use these with a slab allocator */
-#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
-
-/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
- platforms, used as appropriate on others */
-
-#define GFP_DMA __GFP_DMA
-
-/* 4GB DMA on some platforms */
-#define GFP_DMA32 __GFP_DMA32
-
-/* Convert GFP flags to their corresponding migrate type */
static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
- WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
+ BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
- ((gfp_flags & __GFP_RECLAIMABLE) != 0);
+ return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+}
+#undef GFP_MOVABLE_MASK
+#undef GFP_MOVABLE_SHIFT
+
+static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
+{
+ return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#ifdef CONFIG_HIGHMEM
@@ -300,22 +415,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order)
+/*
+ * Allocate pages, preferring the node given as nid. The node must be valid and
+ * online. For more general interface, see alloc_pages_node().
+ */
+static inline struct page *
+__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
- /* Unknown node is current node */
- if (nid < 0)
- nid = numa_node_id();
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON(!node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
-static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+/*
+ * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
+ * prefer the current CPU's closest node. Otherwise node must be valid and
+ * online.
+ */
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
- return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+ return __alloc_pages_node(nid, gfp_mask, order);
}
#ifdef CONFIG_NUMA
@@ -354,7 +478,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
-/* This is different from alloc_pages_exact_node !!! */
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
@@ -368,6 +491,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
extern void __free_kmem_pages(struct page *page, unsigned int order);
extern void free_kmem_pages(unsigned long addr, unsigned int order);
@@ -379,6 +507,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+void page_alloc_init_late(void);
+#else
+static inline void page_alloc_init_late(void)
+{
+}
+#endif
+
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
* GFP flags are used before interrupts are enabled. Once interrupts are
diff --git a/kernel/include/linux/goldfish.h b/kernel/include/linux/goldfish.h
index 569236e6b..93e080b39 100644
--- a/kernel/include/linux/goldfish.h
+++ b/kernel/include/linux/goldfish.h
@@ -3,13 +3,24 @@
/* Helpers for Goldfish virtual platform */
-static inline void gf_write64(unsigned long data,
- void __iomem *portl, void __iomem *porth)
+static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
+ void __iomem *porth)
{
- writel((u32)data, portl);
+ writel((u32)(unsigned long)ptr, portl);
#ifdef CONFIG_64BIT
- writel(data>>32, porth);
+ writel((unsigned long)ptr >> 32, porth);
#endif
}
+static inline void gf_write_dma_addr(const dma_addr_t addr,
+ void __iomem *portl,
+ void __iomem *porth)
+{
+ writel((u32)addr, portl);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(addr >> 32, porth);
+#endif
+}
+
+
#endif /* __LINUX_GOLDFISH_H */
diff --git a/kernel/include/linux/gpio.h b/kernel/include/linux/gpio.h
index ab81339a8..d12b5d566 100644
--- a/kernel/include/linux/gpio.h
+++ b/kernel/include/linux/gpio.h
@@ -196,13 +196,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- /* GPIO can never have been requested */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
diff --git a/kernel/include/linux/gpio/consumer.h b/kernel/include/linux/gpio/consumer.h
index da042657d..fb0fde686 100644
--- a/kernel/include/linux/gpio/consumer.h
+++ b/kernel/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@ enum gpiod_flags {
int gpiod_count(struct device *dev, const char *con_id);
/* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
@@ -100,24 +100,25 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -145,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
return 0;
}
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
- const char *con_id,
- enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
- enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -205,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
@@ -213,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
}
static inline
struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -222,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
@@ -304,9 +305,9 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -322,9 +323,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -341,7 +342,7 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -360,7 +361,7 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -399,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
}
+
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -423,48 +425,11 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
#endif /* CONFIG_GPIOLIB */
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...) \
- __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...) \
- __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...) \
- __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) \
- __devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
- __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
-int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
@@ -481,11 +446,6 @@ static inline int gpiod_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
-static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- return -ENOSYS;
-}
-
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
diff --git a/kernel/include/linux/gpio/driver.h b/kernel/include/linux/gpio/driver.h
index f1b36593e..d1baebf35 100644
--- a/kernel/include/linux/gpio/driver.h
+++ b/kernel/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
struct device;
@@ -20,6 +21,7 @@ struct seq_file;
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
+ * @cdev: class device used by sysfs interface (may be NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
@@ -41,8 +43,12 @@ struct seq_file;
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
- * @base: identifies the first GPIO number handled by this chip; or, if
- * negative during registration, requests dynamic ID allocation.
+ * @base: identifies the first GPIO number handled by this chip;
+ * or, if negative during registration, requests dynamic ID allocation.
+ * DEPRECATION: providing anything non-negative and nailing the base
+ * offset of GPIO chips is deprecated. Please pass -1 as base to
+ * let gpiolib select the chip base in all possible cases. We want to
+ * get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @desc: array of ngpio descriptors. Private.
@@ -57,9 +63,19 @@ struct seq_file;
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @exported: flags if the gpiochip is exported for use from sysfs. Private.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ * between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ * for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ * provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -74,6 +90,7 @@ struct seq_file;
struct gpio_chip {
const char *label;
struct device *dev;
+ struct device *cdev;
struct module *owner;
struct list_head list;
@@ -109,7 +126,6 @@ struct gpio_chip {
const char *const *names;
bool can_sleep;
bool irq_not_threaded;
- bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
@@ -121,6 +137,8 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
+ int irq_parent;
+ struct lock_class_key *lock_key;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -166,14 +184,31 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
+ }) \
+)
+#else
+#define gpiochip_irqchip_add(...) \
+ _gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */
+int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
+void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
+
#ifdef CONFIG_PINCTRL
/**
diff --git a/kernel/include/linux/gpio/machine.h b/kernel/include/linux/gpio/machine.h
index e2706140e..c0d712d22 100644
--- a/kernel/include/linux/gpio/machine.h
+++ b/kernel/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
}
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/kernel/include/linux/gsmmux.h b/kernel/include/linux/gsmmux.h
deleted file mode 100644
index c25e9477f..000000000
--- a/kernel/include/linux/gsmmux.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _LINUX_GSMMUX_H
-#define _LINUX_GSMMUX_H
-
-struct gsm_config
-{
- unsigned int adaption;
- unsigned int encapsulation;
- unsigned int initiator;
- unsigned int t1;
- unsigned int t2;
- unsigned int t3;
- unsigned int n2;
- unsigned int mru;
- unsigned int mtu;
- unsigned int k;
- unsigned int i;
- unsigned int unused[8]; /* Padding for expansion without
- breaking stuff */
-};
-
-#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
-#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
-
-struct gsm_netconfig {
- unsigned int adaption; /* Adaption to use in network mode */
- unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
- unsigned short unused2;
- char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* For future use */
-};
-
-#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
-#define GSMIOC_DISABLE_NET _IO('G', 3)
-
-
-#endif
diff --git a/kernel/include/linux/hardirq.h b/kernel/include/linux/hardirq.h
index f4af03404..dfd59d6bc 100644
--- a/kernel/include/linux/hardirq.h
+++ b/kernel/include/linux/hardirq.h
@@ -1,7 +1,7 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
diff --git a/kernel/include/linux/hid.h b/kernel/include/linux/hid.h
index 176b43670..251a1d382 100644
--- a/kernel/include/linux/hid.h
+++ b/kernel/include/linux/hid.h
@@ -698,8 +698,8 @@ struct hid_driver {
int (*input_mapped)(struct hid_device *hdev,
struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max);
- void (*input_configured)(struct hid_device *hdev,
- struct hid_input *hidinput);
+ int (*input_configured)(struct hid_device *hdev,
+ struct hid_input *hidinput);
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
@@ -815,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
s32 hid_snto32(__u32 value, unsigned n);
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n);
/**
* hid_device_io_start - enable HID input during probe, remove
diff --git a/kernel/include/linux/highmem.h b/kernel/include/linux/highmem.h
index 06bae5a67..a117a33ef 100644
--- a/kernel/include/linux/highmem.h
+++ b/kernel/include/linux/highmem.h
@@ -79,7 +79,6 @@ static inline void __kunmap_atomic(void *addr)
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
#endif
diff --git a/kernel/include/linux/hrtimer.h b/kernel/include/linux/hrtimer.h
index 64e1abb37..8fbcdfa5d 100644
--- a/kernel/include/linux/hrtimer.h
+++ b/kernel/include/linux/hrtimer.h
@@ -53,34 +53,25 @@ enum hrtimer_restart {
*
* 0x00 inactive
* 0x01 enqueued into rbtree
- * 0x02 callback function running
- * 0x04 timer is migrated to another cpu
*
- * Special cases:
- * 0x03 callback function running and enqueued
- * (was requeued on another CPU)
- * 0x05 timer was migrated on CPU hotunplug
+ * The callback state is not part of the timer->state because clearing it would
+ * mean touching the timer after the callback, this makes it impossible to free
+ * the timer from the callback function.
*
- * The "callback function running and enqueued" status is only possible on
- * SMP. It happens for example when a posix timer expired and the callback
+ * Therefore we track the callback state in:
+ *
+ * timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
* and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer. We have to preserve the callback running state,
- * as otherwise the timer could be removed before the softirq code finishes the
- * the handling of the timer.
- *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
- * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
- * also affects HRTIMER_STATE_MIGRATE where the preservation is not
- * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
- * enqueued on the new cpu.
+ * signal and rearm the timer.
*
* All state transitions are protected by cpu_base->lock.
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
-#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -96,7 +87,11 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @cb_entry: list entry to defer timers from hardirq context
+ * @irqsafe: timer can run in hardirq context
+ * @praecox: timer expiry time if expired at the time of programming
+ * @is_rel: Set if the timer was armed relative
+ * @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -110,12 +105,13 @@ struct hrtimer {
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
- unsigned long state;
+ u8 state;
struct list_head cb_entry;
int irqsafe;
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
+ u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -135,6 +131,8 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
+# define HRTIMER_CLOCK_BASE_ALIGN 64
+
/**
* struct hrtimer_clock_base - the timer base for a specific clock
* @cpu_base: per cpu clock base
@@ -142,9 +140,8 @@ struct hrtimer_sleeper {
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
- * @resolution: the resolution of the clock, in nanoseconds
+ * @expired: list head for deferred timers.
* @get_time: function to retrieve the current time of the clock
- * @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -153,11 +150,9 @@ struct hrtimer_clock_base {
clockid_t clockid;
struct timerqueue_head active;
struct list_head expired;
- ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t softirq_time;
ktime_t offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
@@ -171,11 +166,16 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set: Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
@@ -184,30 +184,42 @@ enum hrtimer_base_type {
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
+ struct hrtimer *running_soft;
unsigned int cpu;
unsigned int active_bases;
- unsigned int clock_was_set;
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hres_active : 1,
+ hang_detected : 1;
ktime_t expires_next;
- int in_hrtirq;
- int hres_active;
- int hang_detected;
- unsigned long nr_events;
- unsigned long nr_retries;
- unsigned long nr_hangs;
- ktime_t max_hang_time;
+ struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
wait_queue_head_t wait;
#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
+ BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -271,19 +283,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
return ktime_sub(timer->node.expires, timer->base->get_time());
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
@@ -304,21 +313,16 @@ extern void hrtimer_peek_ahead_timers(void);
extern void clock_was_set_delayed(void);
+extern unsigned int hrtimer_resolution;
+
#else
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
- return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
@@ -329,6 +333,27 @@ static inline void clock_was_set_delayed(void) { }
#endif
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+ ktime_t rem = ktime_sub(timer->node.expires, now);
+
+ /*
+ * Adjust relative timers for the extra we added in
+ * hrtimer_start_range_ns() to prevent short timeouts.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+ rem.tv64 -= hrtimer_resolution;
+ return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+ return __hrtimer_expires_remaining_adjusted(timer,
+ timer->base->get_time());
+}
+
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
@@ -362,32 +387,39 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
/* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long delta_ns,
- const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ hrtimer_start_range_ns(timer, tim, 0, mode);
+}
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
-static inline int hrtimer_start_expires(struct hrtimer *timer,
- enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
- return hrtimer_start_range_ns(timer, soft, delta, mode);
+ hrtimer_start_range_ns(timer, soft, delta, mode);
}
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Softirq preemption could deadlock timer removal */
@@ -398,21 +430,17 @@ static inline int hrtimer_restart(struct hrtimer *timer)
#endif
/* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-
-extern ktime_t hrtimer_get_next_event(void);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-/*
- * A timer is active, when it is enqueued into the rbtree or the
- * callback function is running or it's in the state of being migrated
- * to another cpu.
- */
-static inline int hrtimer_active(const struct hrtimer *timer)
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
- return timer->state != HRTIMER_STATE_INACTIVE;
+ return __hrtimer_get_remaining(timer, false);
}
+extern u64 hrtimer_get_next_event(void);
+
+extern bool hrtimer_active(const struct hrtimer *timer);
+
/*
* Helper function to check, whether the timer is on one of the queues
*/
@@ -425,16 +453,31 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
* Helper function to check, whether the timer is running the callback
* function
*/
-static inline int hrtimer_callback_running(struct hrtimer *timer)
+static inline int hrtimer_callback_running(const struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_CALLBACK;
+ return timer->base->cpu_base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer: hrtimer to forward
+ * @interval: the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
@@ -459,7 +502,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/kernel/include/linux/htirq.h b/kernel/include/linux/htirq.h
index 70a1dbbf2..d4a527e58 100644
--- a/kernel/include/linux/htirq.h
+++ b/kernel/include/linux/htirq.h
@@ -1,24 +1,38 @@
#ifndef LINUX_HTIRQ_H
#define LINUX_HTIRQ_H
+struct pci_dev;
+struct irq_data;
+
struct ht_irq_msg {
u32 address_lo; /* low 32 bits of the ht irq message */
u32 address_hi; /* high 32 bits of the it irq message */
};
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+ struct ht_irq_msg *msg);
+
+struct ht_irq_cfg {
+ struct pci_dev *dev;
+ /* Update callback used to cope with buggy hardware */
+ ht_irq_update_t *update;
+ unsigned pos;
+ unsigned idx;
+ struct ht_irq_msg msg;
+};
+
/* Helper functions.. */
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-struct irq_data;
void mask_ht_irq(struct irq_data *data);
void unmask_ht_irq(struct irq_data *data);
/* The arch hook for getting things started */
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+ ht_irq_update_t *update);
+void arch_teardown_ht_irq(unsigned int irq);
/* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
#endif /* LINUX_HTIRQ_H */
diff --git a/kernel/include/linux/huge_mm.h b/kernel/include/linux/huge_mm.h
index f10b20f05..ecb080d6f 100644
--- a/kernel/include/linux/huge_mm.h
+++ b/kernel/include/linux/huge_mm.h
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
+int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned long pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
#endif
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
-extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
else
return 0;
}
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next)
-{
- if (!vma->anon_vma || vma->vm_ops)
- return;
- __vma_adjust_trans_huge(vma, start, end, adjust_next);
-}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
return ACCESS_ONCE(huge_zero_page) == page;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return is_huge_zero_page(pmd_page(pmd));
+}
+
+struct page *get_huge_zero_page(void);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/kernel/include/linux/hugetlb.h b/kernel/include/linux/hugetlb.h
index 205026175..685c262e0 100644
--- a/kernel/include/linux/hugetlb.h
+++ b/kernel/include/linux/hugetlb.h
@@ -35,6 +35,9 @@ struct resv_map {
struct kref refs;
spinlock_t lock;
struct list_head regions;
+ long adds_in_progress;
+ struct list_head region_cache;
+ long region_cache_count;
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
-void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
+long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+ long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
+void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+extern struct mutex *hugetlb_fault_mutex_table;
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct address_space *mapping,
+ pgoff_t idx, unsigned long address);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
#endif
};
+struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t idx);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -460,18 +474,29 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
-static inline bool hugepages_supported(void)
+#ifndef hugepages_supported
+/*
+ * Some platform decide whether they support huge pages at boot
+ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
+ * when there is no such support
+ */
+#define hugepages_supported() (HPAGE_SHIFT != 0)
+#endif
+
+void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+
+static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
- /*
- * Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- return HPAGE_SHIFT != 0;
+ atomic_long_add(l, &mm->hugetlb_usage);
}
+static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+{
+ atomic_long_sub(l, &mm->hugetlb_usage);
+}
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
@@ -505,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
{
return &mm->page_table_lock;
}
+
+static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
+{
+}
+
+static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
diff --git a/kernel/include/linux/hugetlb_cgroup.h b/kernel/include/linux/hugetlb_cgroup.h
index bcc853ecc..24154c26d 100644
--- a/kernel/include/linux/hugetlb_cgroup.h
+++ b/kernel/include/linux/hugetlb_cgroup.h
@@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].lru.next;
+ return (struct hugetlb_cgroup *)page[2].private;
}
static inline
@@ -42,15 +42,13 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
- page[2].lru.next = (void *)h_cg;
+ page[2].private = (unsigned long)h_cg;
return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
{
- if (hugetlb_cgrp_subsys.disabled)
- return true;
- return false;
+ return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
diff --git a/kernel/include/linux/hwspinlock.h b/kernel/include/linux/hwspinlock.h
index 3343298e4..859d673d9 100644
--- a/kernel/include/linux/hwspinlock.h
+++ b/kernel/include/linux/hwspinlock.h
@@ -26,6 +26,7 @@
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
struct device;
+struct device_node;
struct hwspinlock;
struct hwspinlock_device;
struct hwspinlock_ops;
@@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank);
struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
+int of_hwspin_lock_get_id(struct device_node *np, int index);
int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
@@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+ return 0;
+}
+
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
return 0;
diff --git a/kernel/include/linux/hyperv.h b/kernel/include/linux/hyperv.h
index 902c37aef..ae6a711dc 100644
--- a/kernel/include/linux/hyperv.h
+++ b/kernel/include/linux/hyperv.h
@@ -26,6 +26,7 @@
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
+#include <uapi/asm/hyperv.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
@@ -160,16 +161,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
+ * 4 . 0 (Windows 10)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
#define VERSION_INVAL -1
-#define VERSION_CURRENT VERSION_WIN8_1
+#define VERSION_CURRENT VERSION_WIN10
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -389,10 +392,7 @@ enum vmbus_channel_message_type {
CHANNELMSG_INITIATE_CONTACT = 14,
CHANNELMSG_VERSION_RESPONSE = 15,
CHANNELMSG_UNLOAD = 16,
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
- CHANNELMSG_VIEWRANGE_ADD = 17,
- CHANNELMSG_VIEWRANGE_REMOVE = 18,
-#endif
+ CHANNELMSG_UNLOAD_RESPONSE = 17,
CHANNELMSG_COUNT
};
@@ -549,21 +549,6 @@ struct vmbus_channel_gpadl_torndown {
u32 gpadl;
} __packed;
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
-struct vmbus_channel_view_range_add {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u64 viewrange_length;
- u32 child_relid;
-} __packed;
-
-struct vmbus_channel_view_range_remove {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u32 child_relid;
-} __packed;
-#endif
-
struct vmbus_channel_relid_released {
struct vmbus_channel_message_header header;
u32 child_relid;
@@ -645,6 +630,11 @@ struct hv_input_signal_event_buffer {
struct hv_input_signal_event event;
};
+enum hv_signal_policy {
+ HV_SIGNAL_POLICY_DEFAULT = 0,
+ HV_SIGNAL_POLICY_EXPLICIT,
+};
+
struct vmbus_channel {
/* Unique channel id */
int id;
@@ -713,6 +703,11 @@ struct vmbus_channel {
/* The corresponding CPUID in the guest */
u32 target_cpu;
/*
+ * State to manage the CPU affiliation of channels.
+ */
+ struct cpumask alloced_cpus_in_node;
+ int numa_node;
+ /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
@@ -745,6 +740,15 @@ struct vmbus_channel {
*/
struct list_head sc_list;
/*
+ * Current number of sub-channels.
+ */
+ int num_sc;
+ /*
+ * Number of a sub-channel (position within sc_list) which is supposed
+ * to be used as the next outgoing channel.
+ */
+ int next_oc;
+ /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -758,11 +762,21 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
-
- int num_sc;
- int next_oc;
+ /*
+ * Host signaling policy: The default policy will be
+ * based on the ring buffer state. We will also support
+ * a policy where the client driver can have explicit
+ * signaling control.
+ */
+ enum hv_signal_policy signal_policy;
};
+static inline void set_channel_signal_state(struct vmbus_channel *c,
+ enum hv_signal_policy policy)
+{
+ c->signal_policy = policy;
+}
+
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
{
c->batched_reading = state;
@@ -982,6 +996,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ resource_size_t min, resource_size_t max,
+ resource_size_t size, resource_size_t align,
+ bool fb_overlap_ok);
+
/**
* VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
*
@@ -1236,17 +1255,8 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
-int hv_kvp_init(struct hv_util_service *);
-void hv_kvp_deinit(void);
-void hv_kvp_onchannelcallback(void *);
-
-int hv_vss_init(struct hv_util_service *);
-void hv_vss_deinit(void);
-void hv_vss_onchannelcallback(void *);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
-extern struct resource hyperv_mmio;
-
/*
* Negotiated version with the Host.
*/
diff --git a/kernel/include/linux/i2c-ocores.h b/kernel/include/linux/i2c-ocores.h
index 1c06b5c7c..01edd96fe 100644
--- a/kernel/include/linux/i2c-ocores.h
+++ b/kernel/include/linux/i2c-ocores.h
@@ -15,6 +15,7 @@ struct ocores_i2c_platform_data {
u32 reg_shift; /* register offset shift value */
u32 reg_io_width; /* register io read/write width */
u32 clock_khz; /* input clock in kHz */
+ bool big_endian; /* registers are big endian */
u8 num_devices; /* number of devices in the devices list */
struct i2c_board_info const *devices; /* devices connected to the bus */
};
diff --git a/kernel/include/linux/i2c.h b/kernel/include/linux/i2c.h
index e83a738a3..768063baa 100644
--- a/kernel/include/linux/i2c.h
+++ b/kernel/include/linux/i2c.h
@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
+extern s32
+i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
#endif /* I2C */
/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
/*flags for the client struct: */
-#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
-#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
/* must call put_device() when done with returned i2c_adapter device */
extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
{
return NULL;
}
+
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
#endif /* _LINUX_I2C_H */
diff --git a/kernel/include/linux/i2c/i2c-rcar.h b/kernel/include/linux/i2c/i2c-rcar.h
deleted file mode 100644
index 496f5c2b2..000000000
--- a/kernel/include/linux/i2c/i2c-rcar.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __I2C_R_CAR_H__
-#define __I2C_R_CAR_H__
-
-#include <linux/platform_device.h>
-
-struct i2c_rcar_platform_data {
- u32 bus_speed;
-};
-
-#endif /* __I2C_R_CAR_H__ */
diff --git a/kernel/include/linux/i2c/twl.h b/kernel/include/linux/i2c/twl.h
index 0bc03f100..9ad7828d9 100644
--- a/kernel/include/linux/i2c/twl.h
+++ b/kernel/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
struct twl4030_resconfig *board_config;
#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
bool use_poweroff; /* Board is wired for TWL poweroff */
+ bool ac_charger_quirk; /* Disable AC charger on board */
};
extern int twl4030_remove_script(u8 flags);
diff --git a/kernel/include/linux/ide.h b/kernel/include/linux/ide.h
index 93b5ca754..a633898f3 100644
--- a/kernel/include/linux/ide.h
+++ b/kernel/include/linux/ide.h
@@ -39,6 +39,19 @@
struct device;
+/* IDE-specific values for req->cmd_type */
+enum ata_cmd_type_bits {
+ REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
+ REQ_TYPE_ATA_PC,
+ REQ_TYPE_ATA_SENSE, /* sense request */
+ REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
+ REQ_TYPE_ATA_PM_RESUME, /* resume request */
+};
+
+#define ata_pm_request(rq) \
+ ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
+ (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
+
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
@@ -1314,6 +1327,19 @@ struct ide_port_info {
u8 udma_mask;
};
+/*
+ * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
+ * requests.
+ */
+struct ide_pm_state {
+ /* PM state machine step value, currently driver specific */
+ int pm_step;
+ /* requested PM state value (S1, S2, S3, S4, ...) */
+ u32 pm_state;
+ void* data; /* for driver use */
+};
+
+
int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
const struct ide_port_info *, void *);
@@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
#define ide_host_for_each_port(i, port, host) \
for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
+
#endif /* _IDE_H */
diff --git a/kernel/include/linux/ieee80211.h b/kernel/include/linux/ieee80211.h
index b9c7897dc..452c0b0d2 100644
--- a/kernel/include/linux/ieee80211.h
+++ b/kernel/include/linux/ieee80211.h
@@ -121,7 +121,7 @@
#define IEEE80211_MAX_SN IEEE80211_SN_MASK
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
-static inline int ieee80211_sn_less(u16 sn1, u16 sn2)
+static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
{
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
@@ -250,7 +250,7 @@ struct ieee80211_qos_hdr {
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_tods(__le16 fc)
+static inline bool ieee80211_has_tods(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0;
}
@@ -259,7 +259,7 @@ static inline int ieee80211_has_tods(__le16 fc)
* ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_fromds(__le16 fc)
+static inline bool ieee80211_has_fromds(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0;
}
@@ -268,7 +268,7 @@ static inline int ieee80211_has_fromds(__le16 fc)
* ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_a4(__le16 fc)
+static inline bool ieee80211_has_a4(__le16 fc)
{
__le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
return (fc & tmp) == tmp;
@@ -278,7 +278,7 @@ static inline int ieee80211_has_a4(__le16 fc)
* ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_morefrags(__le16 fc)
+static inline bool ieee80211_has_morefrags(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0;
}
@@ -287,7 +287,7 @@ static inline int ieee80211_has_morefrags(__le16 fc)
* ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_retry(__le16 fc)
+static inline bool ieee80211_has_retry(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0;
}
@@ -296,7 +296,7 @@ static inline int ieee80211_has_retry(__le16 fc)
* ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_pm(__le16 fc)
+static inline bool ieee80211_has_pm(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0;
}
@@ -305,7 +305,7 @@ static inline int ieee80211_has_pm(__le16 fc)
* ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_moredata(__le16 fc)
+static inline bool ieee80211_has_moredata(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0;
}
@@ -314,7 +314,7 @@ static inline int ieee80211_has_moredata(__le16 fc)
* ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_protected(__le16 fc)
+static inline bool ieee80211_has_protected(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0;
}
@@ -323,7 +323,7 @@ static inline int ieee80211_has_protected(__le16 fc)
* ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_has_order(__le16 fc)
+static inline bool ieee80211_has_order(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0;
}
@@ -332,7 +332,7 @@ static inline int ieee80211_has_order(__le16 fc)
* ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_mgmt(__le16 fc)
+static inline bool ieee80211_is_mgmt(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT);
@@ -342,7 +342,7 @@ static inline int ieee80211_is_mgmt(__le16 fc)
* ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_ctl(__le16 fc)
+static inline bool ieee80211_is_ctl(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL);
@@ -352,7 +352,7 @@ static inline int ieee80211_is_ctl(__le16 fc)
* ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_data(__le16 fc)
+static inline bool ieee80211_is_data(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_DATA);
@@ -362,7 +362,7 @@ static inline int ieee80211_is_data(__le16 fc)
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_data_qos(__le16 fc)
+static inline bool ieee80211_is_data_qos(__le16 fc)
{
/*
* mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need
@@ -376,7 +376,7 @@ static inline int ieee80211_is_data_qos(__le16 fc)
* ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_data_present(__le16 fc)
+static inline bool ieee80211_is_data_present(__le16 fc)
{
/*
* mask with 0x40 and test that that bit is clear to only return true
@@ -390,7 +390,7 @@ static inline int ieee80211_is_data_present(__le16 fc)
* ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_assoc_req(__le16 fc)
+static inline bool ieee80211_is_assoc_req(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
@@ -400,7 +400,7 @@ static inline int ieee80211_is_assoc_req(__le16 fc)
* ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_assoc_resp(__le16 fc)
+static inline bool ieee80211_is_assoc_resp(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP);
@@ -410,7 +410,7 @@ static inline int ieee80211_is_assoc_resp(__le16 fc)
* ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_reassoc_req(__le16 fc)
+static inline bool ieee80211_is_reassoc_req(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ);
@@ -420,7 +420,7 @@ static inline int ieee80211_is_reassoc_req(__le16 fc)
* ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_reassoc_resp(__le16 fc)
+static inline bool ieee80211_is_reassoc_resp(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP);
@@ -430,7 +430,7 @@ static inline int ieee80211_is_reassoc_resp(__le16 fc)
* ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_probe_req(__le16 fc)
+static inline bool ieee80211_is_probe_req(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ);
@@ -440,7 +440,7 @@ static inline int ieee80211_is_probe_req(__le16 fc)
* ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_probe_resp(__le16 fc)
+static inline bool ieee80211_is_probe_resp(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
@@ -450,7 +450,7 @@ static inline int ieee80211_is_probe_resp(__le16 fc)
* ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_beacon(__le16 fc)
+static inline bool ieee80211_is_beacon(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
@@ -460,7 +460,7 @@ static inline int ieee80211_is_beacon(__le16 fc)
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_atim(__le16 fc)
+static inline bool ieee80211_is_atim(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM);
@@ -470,7 +470,7 @@ static inline int ieee80211_is_atim(__le16 fc)
* ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_disassoc(__le16 fc)
+static inline bool ieee80211_is_disassoc(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC);
@@ -480,7 +480,7 @@ static inline int ieee80211_is_disassoc(__le16 fc)
* ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_auth(__le16 fc)
+static inline bool ieee80211_is_auth(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
@@ -490,7 +490,7 @@ static inline int ieee80211_is_auth(__le16 fc)
* ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_deauth(__le16 fc)
+static inline bool ieee80211_is_deauth(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -500,7 +500,7 @@ static inline int ieee80211_is_deauth(__le16 fc)
* ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_action(__le16 fc)
+static inline bool ieee80211_is_action(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
@@ -510,7 +510,7 @@ static inline int ieee80211_is_action(__le16 fc)
* ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_back_req(__le16 fc)
+static inline bool ieee80211_is_back_req(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
@@ -520,7 +520,7 @@ static inline int ieee80211_is_back_req(__le16 fc)
* ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_back(__le16 fc)
+static inline bool ieee80211_is_back(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK);
@@ -530,7 +530,7 @@ static inline int ieee80211_is_back(__le16 fc)
* ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_pspoll(__le16 fc)
+static inline bool ieee80211_is_pspoll(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
@@ -540,7 +540,7 @@ static inline int ieee80211_is_pspoll(__le16 fc)
* ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_rts(__le16 fc)
+static inline bool ieee80211_is_rts(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
@@ -550,7 +550,7 @@ static inline int ieee80211_is_rts(__le16 fc)
* ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_cts(__le16 fc)
+static inline bool ieee80211_is_cts(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
@@ -560,7 +560,7 @@ static inline int ieee80211_is_cts(__le16 fc)
* ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_ack(__le16 fc)
+static inline bool ieee80211_is_ack(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK);
@@ -570,7 +570,7 @@ static inline int ieee80211_is_ack(__le16 fc)
* ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_cfend(__le16 fc)
+static inline bool ieee80211_is_cfend(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND);
@@ -580,7 +580,7 @@ static inline int ieee80211_is_cfend(__le16 fc)
* ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_cfendack(__le16 fc)
+static inline bool ieee80211_is_cfendack(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK);
@@ -590,7 +590,7 @@ static inline int ieee80211_is_cfendack(__le16 fc)
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_nullfunc(__le16 fc)
+static inline bool ieee80211_is_nullfunc(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
@@ -600,7 +600,7 @@ static inline int ieee80211_is_nullfunc(__le16 fc)
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_qos_nullfunc(__le16 fc)
+static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
@@ -624,7 +624,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
*/
-static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
{
return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
}
@@ -1379,6 +1379,7 @@ struct ieee80211_ht_operation {
/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
@@ -1745,8 +1746,7 @@ enum ieee80211_eid {
WLAN_EID_TIM = 5,
WLAN_EID_IBSS_PARAMS = 6,
WLAN_EID_COUNTRY = 7,
- WLAN_EID_HP_PARAMS = 8,
- WLAN_EID_HP_TABLE = 9,
+ /* 8, 9 reserved */
WLAN_EID_REQUEST = 10,
WLAN_EID_QBSS_LOAD = 11,
WLAN_EID_EDCA_PARAM_SET = 12,
@@ -1932,6 +1932,8 @@ enum ieee80211_category {
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
+ WLAN_CATEGORY_WNM = 10,
+ WLAN_CATEGORY_WNM_UNPROTECTED = 11,
WLAN_CATEGORY_TDLS = 12,
WLAN_CATEGORY_MESH_ACTION = 13,
WLAN_CATEGORY_MULTIHOP_ACTION = 14,
@@ -2074,8 +2076,8 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
@@ -2396,7 +2398,10 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
category = ((u8 *) hdr) + 24;
return *category != WLAN_CATEGORY_PUBLIC &&
*category != WLAN_CATEGORY_HT &&
+ *category != WLAN_CATEGORY_WNM_UNPROTECTED &&
*category != WLAN_CATEGORY_SELF_PROTECTED &&
+ *category != WLAN_CATEGORY_UNPROT_DMG &&
+ *category != WLAN_CATEGORY_VHT &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
diff --git a/kernel/include/linux/ieee802154.h b/kernel/include/linux/ieee802154.h
index 8872ca103..d3e415674 100644
--- a/kernel/include/linux/ieee802154.h
+++ b/kernel/include/linux/ieee802154.h
@@ -25,12 +25,22 @@
#include <linux/types.h>
#include <linux/random.h>
-#include <asm/byteorder.h>
#define IEEE802154_MTU 127
#define IEEE802154_ACK_PSDU_LEN 5
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
+#define IEEE802154_MAX_AUTH_TAG_LEN 16
+
+/* General MAC frame format:
+ * 2 bytes: Frame Control
+ * 1 byte: Sequence Number
+ * 20 bytes: Addressing fields
+ * 14 bytes: Auxiliary Security Header
+ */
+#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14)
+#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \
+ IEEE802154_FCS_LEN)
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -205,6 +215,41 @@ enum {
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
};
+/* frame control handling */
+#define IEEE802154_FCTL_FTYPE 0x0003
+#define IEEE802154_FCTL_ACKREQ 0x0020
+#define IEEE802154_FCTL_INTRA_PAN 0x0040
+
+#define IEEE802154_FTYPE_DATA 0x0001
+
+/*
+ * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee802154_is_data(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE802154_FTYPE_DATA);
+}
+
+/**
+ * ieee802154_is_ackreq - check if acknowledgment request bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_ackreq(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ);
+}
+
+/**
+ * ieee802154_is_intra_pan - check if intra pan id communication
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_intra_pan(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN);
+}
+
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
* available lengths:
@@ -225,15 +270,13 @@ static inline bool ieee802154_is_valid_psdu_len(const u8 len)
* ieee802154_is_valid_psdu_len - check if extended addr is valid
* @addr: extended addr to check
*/
-static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
{
- /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
- * is used internally as extended to short address broadcast mapping.
- * This is currently a workaround because neighbor discovery can't
- * deal with short addresses types right now.
+ /* Bail out if the address is all zero, or if the group
+ * address bit is set.
*/
return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
- (addr != cpu_to_le64(0xffffffffffffffffULL)));
+ !(addr & cpu_to_le64(0x0100000000000000ULL)));
}
/**
@@ -244,9 +287,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr)
{
get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
- /* toggle some bit if we hit an invalid extended addr */
- if (!ieee802154_is_valid_extended_addr(*addr))
- ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+ /* clear the group bit, and set the locally administered bit */
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
}
#endif /* LINUX_IEEE802154_H */
diff --git a/kernel/include/linux/if_bridge.h b/kernel/include/linux/if_bridge.h
index dad8b00be..a338a688e 100644
--- a/kernel/include/linux/if_bridge.h
+++ b/kernel/include/linux/if_bridge.h
@@ -46,6 +46,12 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
+/* values as per ieee8021QBridgeFdbAgingTime */
+#define BR_MIN_AGEING_TIME (10 * HZ)
+#define BR_MAX_AGEING_TIME (1000000 * HZ)
+
+#define BR_DEFAULT_AGEING_TIME (300 * HZ)
+
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int br_should_route_hook_t(struct sk_buff *skb);
diff --git a/kernel/include/linux/if_link.h b/kernel/include/linux/if_link.h
index da4929927..f923d15b4 100644
--- a/kernel/include/linux/if_link.h
+++ b/kernel/include/linux/if_link.h
@@ -5,6 +5,15 @@
/* We don't want this structure exposed to user space */
+struct ifla_vf_stats {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 broadcast;
+ __u64 multicast;
+};
+
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
@@ -15,5 +24,6 @@ struct ifla_vf_info {
__u32 min_tx_rate;
__u32 max_tx_rate;
__u32 rss_query_en;
+ __u32 trusted;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/kernel/include/linux/if_macvlan.h b/kernel/include/linux/if_macvlan.h
index 6f6929ea8..a4ccc3122 100644
--- a/kernel/include/linux/if_macvlan.h
+++ b/kernel/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@ struct macvtap_queue;
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
*/
-#define MAX_MACVTAP_QUEUES 16
+#define MAX_MACVTAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
diff --git a/kernel/include/linux/if_pppox.h b/kernel/include/linux/if_pppox.h
index 66a7d7600..b49cf923b 100644
--- a/kernel/include/linux/if_pppox.h
+++ b/kernel/include/linux/if_pppox.h
@@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
struct module;
struct pppox_proto {
- int (*create)(struct net *net, struct socket *sock);
+ int (*create)(struct net *net, struct socket *sock, int kern);
int (*ioctl)(struct socket *sock, unsigned int cmd,
unsigned long arg);
struct module *owner;
diff --git a/kernel/include/linux/if_vlan.h b/kernel/include/linux/if_vlan.h
index 920e4457c..67ce5bd3b 100644
--- a/kernel/include/linux/if_vlan.h
+++ b/kernel/include/linux/if_vlan.h
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not of VLAN type
*/
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if @skb->vlan_tci is not set correctly
*/
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not VLAN tagged
*/
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= ETH_P_802_3_MIN) {
+ if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}
@@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
return features;
}
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+ const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+ return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+ ((__force u32)h1->h_vlan_encapsulated_proto ^
+ (__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/kernel/include/linux/igmp.h b/kernel/include/linux/igmp.h
index 2c677afee..9c9de1154 100644
--- a/kernel/include/linux/igmp.h
+++ b/kernel/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
+extern int sysctl_igmp_llm_reports;
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
extern int sysctl_igmp_qrv;
@@ -109,7 +110,7 @@ struct ip_mc_list {
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
-extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
+extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
@@ -130,5 +131,6 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
#endif
diff --git a/kernel/include/linux/iio/buffer.h b/kernel/include/linux/iio/buffer.h
index eb8622b78..1600c5582 100644
--- a/kernel/include/linux/iio/buffer.h
+++ b/kernel/include/linux/iio/buffer.h
@@ -29,6 +29,7 @@ struct iio_buffer;
* @set_length: set number of datums in buffer
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @modes: Supported operating modes by this buffer type
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -51,6 +52,8 @@ struct iio_buffer_access_funcs {
int (*set_length)(struct iio_buffer *buffer, int length);
void (*release)(struct iio_buffer *buffer);
+
+ unsigned int modes;
};
/**
diff --git a/kernel/include/linux/iio/common/st_sensors.h b/kernel/include/linux/iio/common/st_sensors.h
index 2c476acb8..2fe939c73 100644
--- a/kernel/include/linux/iio/common/st_sensors.h
+++ b/kernel/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
+ * @wai_addr: The address of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
* @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
*/
struct st_sensor_settings {
u8 wai;
+ u8 wai_addr;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
int num_ch;
@@ -269,6 +271,10 @@ void st_sensors_power_enable(struct iio_dev *indio_dev);
void st_sensors_power_disable(struct iio_dev *indio_dev);
+int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval);
+
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr);
int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable);
diff --git a/kernel/include/linux/iio/consumer.h b/kernel/include/linux/iio/consumer.h
index 26fb8f634..fad58671c 100644
--- a/kernel/include/linux/iio/consumer.h
+++ b/kernel/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
/**
* iio_channel_cb_get_channels() - get access to the underlying channels.
- * @cb_buff: The callback buffer from whom we want the channel
+ * @cb_buffer: The callback buffer from whom we want the channel
* information.
*
* This function allows one to obtain information about the channels.
diff --git a/kernel/include/linux/iio/iio.h b/kernel/include/linux/iio/iio.h
index 5ed7771ad..19c94c9ac 100644
--- a/kernel/include/linux/iio/iio.h
+++ b/kernel/include/linux/iio/iio.h
@@ -32,6 +32,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
IIO_CHAN_INFO_AVERAGE_RAW,
IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_SAMP_FREQ,
IIO_CHAN_INFO_FREQUENCY,
IIO_CHAN_INFO_PHASE,
@@ -43,6 +44,8 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_CALIBWEIGHT,
IIO_CHAN_INFO_DEBOUNCE_COUNT,
IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
};
enum iio_shared_by {
@@ -291,6 +294,7 @@ static inline s64 iio_get_time_ns(void)
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
#define INDIO_BUFFER_HARDWARE 0x08
+#define INDIO_EVENT_TRIGGERED 0x10
#define INDIO_ALL_BUFFER_MODES \
(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
@@ -454,6 +458,7 @@ struct iio_buffer_setup_ops {
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
* @pollfunc: [DRIVER] function run on trigger being received
+ * @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of channels specified in @channels.
* @channel_attr_list: [INTERN] keep track of automatically created channel
@@ -492,6 +497,7 @@ struct iio_dev {
unsigned scan_index_timestamp;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
+ struct iio_poll_func *pollfunc_event;
struct iio_chan_spec const *channels;
int num_channels;
diff --git a/kernel/include/linux/iio/sysfs.h b/kernel/include/linux/iio/sysfs.h
index 8a1d18640..9cd8f7472 100644
--- a/kernel/include/linux/iio/sysfs.h
+++ b/kernel/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@ struct iio_chan_spec;
* struct iio_dev_attr - iio specific device attribute
* @dev_attr: underlying device attribute
* @address: associated register address
- * @l: list head for maintaining list of dynamically created attrs.
+ * @l: list head for maintaining list of dynamically created attrs
+ * @c: specification for the underlying channel
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
diff --git a/kernel/include/linux/iio/trigger.h b/kernel/include/linux/iio/trigger.h
index fa76c79a5..1c9e028e0 100644
--- a/kernel/include/linux/iio/trigger.h
+++ b/kernel/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@ struct iio_subirq {
bool enabled;
};
+struct iio_dev;
+struct iio_trigger;
+
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @owner: used to monitor usage count of the trigger.
diff --git a/kernel/include/linux/iio/triggered_buffer.h b/kernel/include/linux/iio/triggered_buffer.h
index c378ebec6..f72f70d5a 100644
--- a/kernel/include/linux/iio/triggered_buffer.h
+++ b/kernel/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@ struct iio_dev;
struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
- irqreturn_t (*pollfunc_bh)(int irq, void *p),
- irqreturn_t (*pollfunc_th)(int irq, void *p),
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
diff --git a/kernel/include/linux/iio/triggered_event.h b/kernel/include/linux/iio/triggered_event.h
new file mode 100644
index 000000000..8fe853708
--- /dev/null
+++ b/kernel/include/linux/iio/triggered_event.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_
+#define _LINUX_IIO_TRIGGERED_EVENT_H_
+
+#include <linux/interrupt.h>
+
+int iio_triggered_event_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p));
+void iio_triggered_event_cleanup(struct iio_dev *indio_dev);
+
+#endif
diff --git a/kernel/include/linux/iio/types.h b/kernel/include/linux/iio/types.h
index 942b6de68..32b579525 100644
--- a/kernel/include/linux/iio/types.h
+++ b/kernel/include/linux/iio/types.h
@@ -17,6 +17,8 @@ enum iio_event_info {
IIO_EV_INFO_VALUE,
IIO_EV_INFO_HYSTERESIS,
IIO_EV_INFO_PERIOD,
+ IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
+ IIO_EV_INFO_LOW_PASS_FILTER_3DB,
};
#define IIO_VAL_INT 1
diff --git a/kernel/include/linux/inet_diag.h b/kernel/include/linux/inet_diag.h
index ac48b10c9..0e707f0c1 100644
--- a/kernel/include/linux/inet_diag.h
+++ b/kernel/include/linux/inet_diag.h
@@ -24,6 +24,7 @@ struct inet_diag_handler {
struct inet_diag_msg *r,
void *info);
__u16 idiag_type;
+ __u16 idiag_info_size;
};
struct inet_connection_sock;
diff --git a/kernel/include/linux/inetdevice.h b/kernel/include/linux/inetdevice.h
index 0a21fbefd..ee971f335 100644
--- a/kernel/include/linux/inetdevice.h
+++ b/kernel/include/linux/inetdevice.h
@@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
|| (!IN_DEV_FORWARD(in_dev) && \
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
+#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
+ IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
@@ -168,7 +171,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
__be32 local, int scope);
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask);
-static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
{
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
}
@@ -177,15 +180,15 @@ static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
* Check if a mask is acceptable.
*/
-static __inline__ int bad_mask(__be32 mask, __be32 addr)
+static __inline__ bool bad_mask(__be32 mask, __be32 addr)
{
__u32 hmask;
if (addr & (mask = ~mask))
- return 1;
+ return true;
hmask = ntohl(mask);
if (hmask & (hmask+1))
- return 1;
- return 0;
+ return true;
+ return false;
}
#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
diff --git a/kernel/include/linux/init.h b/kernel/include/linux/init.h
index 21b6d768e..b449f378f 100644
--- a/kernel/include/linux/init.h
+++ b/kernel/include/linux/init.h
@@ -91,14 +91,6 @@
#define __exit __section(.exit.text) __exitused __cold notrace
-/* temporary, until all users are removed */
-#define __cpuinit
-#define __cpuinitdata
-#define __cpuinitconst
-#define __cpuexit
-#define __cpuexitdata
-#define __cpuexitconst
-
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data)
@@ -116,9 +108,6 @@
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
-/* temporary, until all users are removed */
-#define __CPUINIT
-
#define __MEMINIT .section ".meminit.text", "ax"
#define __MEMINITDATA .section ".meminit.data", "aw"
#define __MEMINITRODATA .section ".meminit.rodata", "a"
@@ -293,68 +282,8 @@ void __init parse_early_param(void);
void __init parse_early_options(char *cmdline);
#endif /* __ASSEMBLY__ */
-/**
- * module_init() - driver initialization entry point
- * @x: function to be run at kernel boot time or module insertion
- *
- * module_init() will either be called during do_initcalls() (if
- * builtin) or at module insertion time (if a module). There can only
- * be one per module.
- */
-#define module_init(x) __initcall(x);
-
-/**
- * module_exit() - driver exit entry point
- * @x: function to be run when driver is removed
- *
- * module_exit() will wrap the driver clean-up code
- * with cleanup_module() when used with rmmod when
- * the driver is a module. If the driver is statically
- * compiled into the kernel, module_exit() has no effect.
- * There can only be one per module.
- */
-#define module_exit(x) __exitcall(x);
-
#else /* MODULE */
-/*
- * In most cases loadable modules do not need custom
- * initcall levels. There are still some valid cases where
- * a driver may be needed early if built in, and does not
- * matter when built as a loadable module. Like bus
- * snooping debug drivers.
- */
-#define early_initcall(fn) module_init(fn)
-#define core_initcall(fn) module_init(fn)
-#define core_initcall_sync(fn) module_init(fn)
-#define postcore_initcall(fn) module_init(fn)
-#define postcore_initcall_sync(fn) module_init(fn)
-#define arch_initcall(fn) module_init(fn)
-#define subsys_initcall(fn) module_init(fn)
-#define subsys_initcall_sync(fn) module_init(fn)
-#define fs_initcall(fn) module_init(fn)
-#define fs_initcall_sync(fn) module_init(fn)
-#define rootfs_initcall(fn) module_init(fn)
-#define device_initcall(fn) module_init(fn)
-#define device_initcall_sync(fn) module_init(fn)
-#define late_initcall(fn) module_init(fn)
-#define late_initcall_sync(fn) module_init(fn)
-
-#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
-
-/* Each module must use one module_init(). */
-#define module_init(initfn) \
- static inline initcall_t __inittest(void) \
- { return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
-
-/* This is only required if you want to be unloadable. */
-#define module_exit(exitfn) \
- static inline exitcall_t __exittest(void) \
- { return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
-
#define __setup_param(str, unique_id, fn) /* nothing */
#define __setup(str, func) /* nothing */
#endif
@@ -362,24 +291,6 @@ void __init parse_early_options(char *cmdline);
/* Data marked not to be saved by software suspend */
#define __nosavedata __section(.data..nosave)
-/* This means "can be init if no module support, otherwise module load
- may call it." */
-#ifdef CONFIG_MODULES
-#define __init_or_module
-#define __initdata_or_module
-#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
-#else
-#define __init_or_module __init
-#define __initdata_or_module __initdata
-#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
-#endif /*CONFIG_MODULES*/
-
#ifdef MODULE
#define __exit_p(x) x
#else
diff --git a/kernel/include/linux/init_task.h b/kernel/include/linux/init_task.h
index 4a77d39ff..d8ec0f202 100644
--- a/kernel/include/linux/init_task.h
+++ b/kernel/include/linux/init_task.h
@@ -25,13 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-#ifdef CONFIG_CGROUPS
-#define INIT_GROUP_RWSEM(sig) \
- .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
-#else
-#define INIT_GROUP_RWSEM(sig)
-#endif
-
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -39,6 +32,14 @@ extern struct fs_struct init_fs;
#define INIT_CPUSET_SEQ(tsk)
#endif
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
+},
+#else
+#define INIT_PREV_CPUTIME(x)
+#endif
+
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -50,13 +51,13 @@ extern struct fs_struct init_fs;
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
- .cputime = INIT_CPUTIME, \
- .running = 0, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
+ .running = false, \
+ .checking_timer = false, \
}, \
+ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
- INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
@@ -263,6 +264,7 @@ extern struct task_group root_task_group;
INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
+ INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
diff --git a/kernel/include/linux/input.h b/kernel/include/linux/input.h
index 82ce323b9..1e967694e 100644
--- a/kernel/include/linux/input.h
+++ b/kernel/include/linux/input.h
@@ -469,6 +469,8 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke);
+void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
+
extern struct class input_class;
/**
diff --git a/kernel/include/linux/input/edt-ft5x06.h b/kernel/include/linux/input/edt-ft5x06.h
deleted file mode 100644
index 8a1e0d1a0..000000000
--- a/kernel/include/linux/input/edt-ft5x06.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _EDT_FT5X06_H
-#define _EDT_FT5X06_H
-
-/*
- * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-struct edt_ft5x06_platform_data {
- int irq_pin;
- int reset_pin;
-
- /* startup defaults for operational parameters */
- bool use_parameters;
- u8 gain;
- u8 threshold;
- u8 offset;
- u8 report_rate;
-};
-
-#endif /* _EDT_FT5X06_H */
diff --git a/kernel/include/linux/input/touchscreen.h b/kernel/include/linux/input/touchscreen.h
index 08a5ef6e8..c91e13761 100644
--- a/kernel/include/linux/input/touchscreen.h
+++ b/kernel/include/linux/input/touchscreen.h
@@ -9,14 +9,8 @@
#ifndef _TOUCHSCREEN_H
#define _TOUCHSCREEN_H
-#include <linux/input.h>
+struct input_dev;
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
#endif
diff --git a/kernel/include/linux/intel-iommu.h b/kernel/include/linux/intel-iommu.h
index a240e61a7..2d9b65004 100644
--- a/kernel/include/linux/intel-iommu.h
+++ b/kernel/include/linux/intel-iommu.h
@@ -1,5 +1,9 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2015, Intel Corporation.
+ *
+ * Authors: Ashok Raj <ashok.raj@intel.com>
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * David Woodhouse <David.Woodhouse@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -13,10 +17,6 @@
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
#ifndef _INTEL_IOMMU_H_
@@ -25,7 +25,10 @@
#include <linux/types.h>
#include <linux/iova.h>
#include <linux/io.h>
+#include <linux/idr.h>
#include <linux/dma_remapping.h>
+#include <linux/mmu_notifier.h>
+#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -57,16 +60,21 @@
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
+#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
+#define DMAR_PRS_REG 0xdc /* Page request status register */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
+#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
#define OFFSET_STRIDE (9)
-/*
-#define dmar_readl(dmar, reg) readl(dmar + reg)
-#define dmar_readq(dmar, reg) ({ \
- u32 lo, hi; \
- lo = readl(dmar + reg); \
- hi = readl(dmar + reg + 4); \
- (((u64) hi) << 32) + lo; })
-*/
+
+#ifdef CONFIG_64BIT
+#define dmar_readq(a) readq(a)
+#define dmar_writeq(a,v) writeq(v,a)
+#else
static inline u64 dmar_readq(void __iomem *addr)
{
u32 lo, hi;
@@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
writel((u32)val, addr);
writel((u32)(val >> 32), addr + 4);
}
+#endif
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
@@ -87,6 +96,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/*
* Decoding Capability Register
*/
+#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -122,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
-/* PASID support used to be on bit 28 */
+#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
@@ -225,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
@@ -252,6 +265,11 @@ enum {
#define QI_DIOTLB_TYPE 0x3
#define QI_IEC_TYPE 0x4
#define QI_IWD_TYPE 0x5
+#define QI_EIOTLB_TYPE 0x6
+#define QI_PC_TYPE 0x7
+#define QI_DEIOTLB_TYPE 0x8
+#define QI_PGRP_RESP_TYPE 0x9
+#define QI_PSTRM_RESP_TYPE 0xa
#define QI_IEC_SELECTIVE (((u64)1) << 4)
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
@@ -279,6 +297,53 @@ enum {
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
+#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
+#define QI_PC_DID(did) (((u64)did) << 16)
+#define QI_PC_GRAN(gran) (((u64)gran) << 4)
+
+#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
+#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
+
+#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
+#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_EIOTLB_AM(am) (((u64)am))
+#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+#define QI_EIOTLB_DID(did) (((u64)did) << 16)
+#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+
+#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
+#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
+#define QI_DEV_EIOTLB_MAX_INVS 32
+
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
+#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
+#define QI_PGRP_RESP_CODE(res) ((u64)(res))
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+#define QI_PGRP_DID(did) (((u64)(did)) << 16)
+#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+
+#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
+#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
+#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
+#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
+#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
+#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
+#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
+
+#define QI_RESP_SUCCESS 0x0
+#define QI_RESP_INVALID 0x1
+#define QI_RESP_FAILURE 0xf
+
+#define QI_GRAN_ALL_ALL 0
+#define QI_GRAN_NONG_ALL 1
+#define QI_GRAN_NONG_PASID 2
+#define QI_GRAN_PSI_PASID 3
+
struct qi_desc {
u64 low, high;
};
@@ -296,9 +361,12 @@ struct q_inval {
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
#define INTR_REMAP_TABLE_ENTRIES 65536
+struct irq_domain;
+
struct ir_table {
struct irte *base;
unsigned long *bitmap;
@@ -320,6 +388,13 @@ enum {
MAX_SR_DMAR_REGS
};
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+
+struct pasid_entry;
+struct pasid_state_entry;
+struct page_req_dsc;
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -331,26 +406,41 @@ struct intel_iommu {
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
- unsigned int irq;
+ unsigned int irq, pr_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain **domains; /* ptr to domains */
+ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
struct iommu_flush flush;
#endif
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ /* These are large and need to be contiguous, so we allocate just
+ * one for now. We'll maybe want to rethink that if we truly give
+ * devices away to userspace processes (e.g. for DPDK) and don't
+ * want to trust that userspace will use *only* the PASID it was
+ * told to. But while it's all driver-arbitrated, we're fine. */
+ struct pasid_entry *pasid_table;
+ struct pasid_state_entry *pasid_state_table;
+ struct page_req_dsc *prq;
+ unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct idr pasid_idr;
+#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
+ u32 flags; /* Software defined flags */
};
static inline void __iommu_flush_cache(
@@ -379,6 +469,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
+extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
+extern int intel_svm_enable_prq(struct intel_iommu *iommu);
+extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+
+struct svm_dev_ops;
+
+struct intel_svm_dev {
+ struct list_head list;
+ struct rcu_head rcu;
+ struct device *dev;
+ struct svm_dev_ops *ops;
+ int users;
+ u16 did;
+ u16 dev_iotlb:1;
+ u16 sid, qdep;
+};
+
+struct intel_svm {
+ struct mmu_notifier notifier;
+ struct mm_struct *mm;
+ struct intel_iommu *iommu;
+ int flags;
+ int pasid;
+ struct list_head devs;
+};
+
+extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
+extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#endif
+
extern const struct attribute_group *intel_iommu_groups[];
#endif
diff --git a/kernel/include/linux/intel-svm.h b/kernel/include/linux/intel-svm.h
new file mode 100644
index 000000000..3c2579404
--- /dev/null
+++ b/kernel/include/linux/intel-svm.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2015 Intel Corporation.
+ *
+ * Authors: David Woodhouse <David.Woodhouse@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INTEL_SVM_H__
+#define __INTEL_SVM_H__
+
+struct device;
+
+struct svm_dev_ops {
+ void (*fault_cb)(struct device *dev, int pasid, u64 address,
+ u32 private, int rwxp, int response);
+};
+
+/* Values for rxwp in fault_cb callback */
+#define SVM_REQ_READ (1<<3)
+#define SVM_REQ_WRITE (1<<2)
+#define SVM_REQ_EXEC (1<<1)
+#define SVM_REQ_PRIV (1<<0)
+
+
+/*
+ * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
+ * PASID for the current process. Even if a PASID already exists, a new one
+ * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
+ * will not be given to subsequent callers. This facility allows a driver to
+ * disambiguate between multiple device contexts which access the same MM,
+ * if there is no other way to do so. It should be used sparingly, if at all.
+ */
+#define SVM_FLAG_PRIVATE_PASID (1<<0)
+
+/*
+ * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
+ * for access to kernel addresses. No IOTLB flushes are automatically done
+ * for kernel mappings; it is valid only for access to the kernel's static
+ * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
+ * A future API addition may permit the use of such ranges, by means of an
+ * explicit IOTLB flush call (akin to the DMA API's unmap method).
+ *
+ * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
+ * do such IOTLB flushes automatically.
+ */
+#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+
+/**
+ * intel_svm_bind_mm() - Bind the current process to a PASID
+ * @dev: Device to be granted acccess
+ * @pasid: Address for allocated PASID
+ * @flags: Flags. Later for requesting supervisor mode, etc.
+ * @ops: Callbacks to device driver
+ *
+ * This function attempts to enable PASID support for the given device.
+ * If the @pasid argument is non-%NULL, a PASID is allocated for access
+ * to the MM of the current process.
+ *
+ * By using a %NULL value for the @pasid argument, this function can
+ * be used to simply validate that PASID support is available for the
+ * given device — i.e. that it is behind an IOMMU which has the
+ * requisite support, and is enabled.
+ *
+ * Page faults are handled transparently by the IOMMU code, and there
+ * should be no need for the device driver to be involved. If a page
+ * fault cannot be handled (i.e. is an invalid address rather than
+ * just needs paging in), then the page request will be completed by
+ * the core IOMMU code with appropriate status, and the device itself
+ * can then report the resulting fault to its driver via whatever
+ * mechanism is appropriate.
+ *
+ * Multiple calls from the same process may result in the same PASID
+ * being re-used. A reference count is kept.
+ */
+extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
+ struct svm_dev_ops *ops);
+
+/**
+ * intel_svm_unbind_mm() - Unbind a specified PASID
+ * @dev: Device for which PASID was allocated
+ * @pasid: PASID value to be unbound
+ *
+ * This function allows a PASID to be retired when the device no
+ * longer requires access to the address space of a given process.
+ *
+ * If the use count for the PASID in question reaches zero, the
+ * PASID is revoked and may no longer be used by hardware.
+ *
+ * Device drivers are required to ensure that no access (including
+ * page requests) is currently outstanding for the PASID in question,
+ * before calling this function.
+ */
+extern int intel_svm_unbind_mm(struct device *dev, int pasid);
+
+#else /* CONFIG_INTEL_IOMMU_SVM */
+
+static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
+ int flags, struct svm_dev_ops *ops)
+{
+ return -ENOSYS;
+}
+
+static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
+{
+ BUG();
+}
+#endif /* CONFIG_INTEL_IOMMU_SVM */
+
+#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+
+#endif /* __INTEL_SVM_H__ */
diff --git a/kernel/include/linux/interrupt.h b/kernel/include/linux/interrupt.h
index fe254555c..655cee096 100644
--- a/kernel/include/linux/interrupt.h
+++ b/kernel/include/linux/interrupt.h
@@ -208,6 +208,7 @@ extern void resume_device_irqs(void);
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
* @work: Work item, for internal use
+ * @list: List item for deferred callbacks
* @notify: Function to be called on change. This will be
* called in process context.
* @release: Function to be called on release. This will be
@@ -422,7 +423,8 @@ enum
BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ,
+ HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+ numbering. Sigh! */
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
@@ -463,6 +465,14 @@ extern void thread_do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
+#else
+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
+{
+ __raise_softirq_irqoff(nr);
+}
+#endif
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -611,10 +621,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
clockid_t which_clock, enum hrtimer_mode mode);
static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
{
- return hrtimer_start(&ttimer->timer, time, mode);
+ hrtimer_start(&ttimer->timer, time, mode);
}
static inline
diff --git a/kernel/include/linux/io-64-nonatomic-hi-lo.h b/kernel/include/linux/io-64-nonatomic-hi-lo.h
new file mode 100644
index 000000000..11d7e840d
--- /dev/null
+++ b/kernel/include/linux/io-64-nonatomic-hi-lo.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_
+#define _LINUX_IO_64_NONATOMIC_HI_LO_H_
+
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+static inline __u64 hi_lo_readq(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ high = readl(p + 1);
+ low = readl(p);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
+{
+ writel(val >> 32, addr + 4);
+ writel(val, addr);
+}
+
+#ifndef readq
+#define readq hi_lo_readq
+#endif
+
+#ifndef writeq
+#define writeq hi_lo_writeq
+#endif
+
+#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/kernel/include/linux/io-64-nonatomic-lo-hi.h b/kernel/include/linux/io-64-nonatomic-lo-hi.h
new file mode 100644
index 000000000..1a4315f97
--- /dev/null
+++ b/kernel/include/linux/io-64-nonatomic-lo-hi.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_
+#define _LINUX_IO_64_NONATOMIC_LO_HI_H_
+
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+static inline __u64 lo_hi_readq(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ low = readl(p);
+ high = readl(p + 1);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+
+#ifndef readq
+#define readq lo_hi_readq
+#endif
+
+#ifndef writeq
+#define writeq lo_hi_writeq
+#endif
+
+#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/kernel/include/linux/io-mapping.h b/kernel/include/linux/io-mapping.h
index c27dde721..e399029b6 100644
--- a/kernel/include/linux/io-mapping.h
+++ b/kernel/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/page.h>
/*
diff --git a/kernel/include/linux/io.h b/kernel/include/linux/io.h
index 986f2bffe..de64c1e53 100644
--- a/kernel/include/linux/io.h
+++ b/kernel/include/linux/io.h
@@ -19,10 +19,14 @@
#define _LINUX_IO_H
#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
+struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -79,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);
+void *devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void devm_memunmap(struct device *dev, void *addr);
+
+void *__devm_memremap_pages(struct device *dev, struct resource *res);
+
+#ifdef CONFIG_ZONE_DEVICE
+void *devm_memremap_pages(struct device *dev, struct resource *res);
+#else
+static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
+{
+ /*
+ * Fail attempts to call devm_memremap_pages() without
+ * ZONE_DEVICE support enabled, this requires callers to fall
+ * back to plain devm_memremap() based on config
+ */
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
@@ -111,6 +136,22 @@ static inline void arch_phys_wc_del(int handle)
}
#define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+ return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
#endif
+#endif
+
+enum {
+ /* See memremap() kernel-doc for usage description... */
+ MEMREMAP_WB = 1 << 0,
+ MEMREMAP_WT = 1 << 1,
+};
+
+void *memremap(resource_size_t offset, size_t size, unsigned long flags);
+void memunmap(void *addr);
#endif /* _LINUX_IO_H */
diff --git a/kernel/include/linux/iommu-common.h b/kernel/include/linux/iommu-common.h
index bbced83b3..376a27c9c 100644
--- a/kernel/include/linux/iommu-common.h
+++ b/kernel/include/linux/iommu-common.h
@@ -7,6 +7,7 @@
#define IOMMU_POOL_HASHBITS 4
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+#define IOMMU_ERROR_CODE (~(unsigned long) 0)
struct iommu_pool {
unsigned long start;
diff --git a/kernel/include/linux/iommu.h b/kernel/include/linux/iommu.h
index 0546b8710..f28dff313 100644
--- a/kernel/include/linux/iommu.h
+++ b/kernel/include/linux/iommu.h
@@ -81,6 +81,7 @@ struct iommu_domain {
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
+ void *iova_cookie;
};
enum iommu_cap {
@@ -114,6 +115,20 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/**
+ * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
+ */
+struct iommu_dm_region {
+ struct list_head list;
+ phys_addr_t start;
+ size_t length;
+ int prot;
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -153,12 +168,16 @@ struct iommu_ops {
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
- int (*device_group)(struct device *dev, unsigned int *groupid);
+ struct iommu_group *(*device_group)(struct device *dev);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
+ /* Request/Free a list of direct mapping requirements for a device */
+ void (*get_dm_regions)(struct device *dev, struct list_head *list);
+ void (*put_dm_regions)(struct device *dev, struct list_head *list);
+
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
@@ -193,6 +212,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -204,6 +224,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern int iommu_request_dm_for_dev(struct device *dev);
+
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
@@ -227,6 +251,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
@@ -234,7 +259,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
struct device *iommu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
- const char *fmt, ...);
+ const char *fmt, ...) __printf(4, 5);
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
@@ -292,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return domain->ops->map_sg(domain, iova, sg, nents, prot);
}
+/* PCI device grouping function */
+extern struct iommu_group *pci_device_group(struct device *dev);
+/* Generic device grouping function */
+extern struct iommu_group *generic_device_group(struct device *dev);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -332,6 +362,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
{
}
+static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+ return NULL;
+}
+
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
@@ -373,6 +408,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
+static inline void iommu_get_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline void iommu_put_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline int iommu_request_dm_for_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
diff --git a/kernel/include/linux/ioport.h b/kernel/include/linux/ioport.h
index 388e3ae94..24bea087e 100644
--- a/kernel/include/linux/ioport.h
+++ b/kernel/include/linux/ioport.h
@@ -94,6 +94,7 @@ struct resource {
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
#define IORESOURCE_IO_FIXED (1<<1)
+#define IORESOURCE_IO_SPARSE (1<<2)
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
diff --git a/kernel/include/linux/iova.h b/kernel/include/linux/iova.h
index 3920a19d8..92f7177db 100644
--- a/kernel/include/linux/iova.h
+++ b/kernel/include/linux/iova.h
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
diff --git a/kernel/include/linux/ipmi_smi.h b/kernel/include/linux/ipmi_smi.h
index 0b1e569f5..f8cea1448 100644
--- a/kernel/include/linux/ipmi_smi.h
+++ b/kernel/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
implement it. */
void (*set_need_watch)(void *send_info, bool enable);
+ /*
+ * Called when flushing all pending messages.
+ */
+ void (*flush_messages)(void *send_info);
+
/* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
upper layer until the start_processing() function in the handlers
is called, and the lower layer must get the interface from that
call. */
-int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
diff --git a/kernel/include/linux/ipv6.h b/kernel/include/linux/ipv6.h
index 82806c60a..402753bcc 100644
--- a/kernel/include/linux/ipv6.h
+++ b/kernel/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
+ __s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
- __u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
+#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
@@ -224,7 +227,7 @@ struct ipv6_pinfo {
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
- struct ipv6_txoptions *opt;
+ struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
@@ -261,9 +264,9 @@ struct tcp6_timewait_sock {
};
#if IS_ENABLED(CONFIG_IPV6)
-static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
- return inet_sk(__sk)->pinet6;
+ return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
diff --git a/kernel/include/linux/irq.h b/kernel/include/linux/irq.h
index 38ecc0c4a..311d3f061 100644
--- a/kernel/include/linux/irq.h
+++ b/kernel/include/linux/irq.h
@@ -67,12 +67,13 @@ enum irqchip_irq_state;
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
- * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_NESTED_THREAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
* mechanism and from core side polling.
* IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -98,22 +99,23 @@ enum {
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
- IRQ_NO_SOFTIRQ_CALL = (1 << 19),
+ IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
/*
* Return value for chip->irq_set_affinity()
*
- * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
+ * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
@@ -128,47 +130,53 @@ struct msi_desc;
struct irq_domain;
/**
- * struct irq_data - per irq and irq chip data passed down to chip functions
+ * struct irq_common_data - per irq data shared by all irqchips
+ * @state_use_accessors: status information for irq chip functions.
+ * Use accessor functions to deal with it
+ * @node: node index useful for balancing
+ * @handler_data: per-IRQ data for the irq_chip methods
+ * @affinity: IRQ affinity on SMP
+ * @msi_desc: MSI descriptor
+ */
+struct irq_common_data {
+ unsigned int state_use_accessors;
+#ifdef CONFIG_NUMA
+ unsigned int node;
+#endif
+ void *handler_data;
+ struct msi_desc *msi_desc;
+ cpumask_var_t affinity;
+};
+
+/**
+ * struct irq_data - per irq chip data passed down to chip functions
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
- * @node: node index useful for balancing
- * @state_use_accessors: status information for irq chip functions.
- * Use accessor functions to deal with it
+ * @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
* @parent_data: pointer to parent struct irq_data to support hierarchy
* irq_domain
- * @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
- * @msi_desc: MSI descriptor
- * @affinity: IRQ affinity on SMP
- *
- * The fields here need to overlay the ones in irq_desc until we
- * cleaned up the direct references and switched everything over to
- * irq_data.
*/
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
- unsigned int node;
- unsigned int state_use_accessors;
+ struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_data *parent_data;
#endif
- void *handler_data;
void *chip_data;
- struct msi_desc *msi_desc;
- cpumask_var_t affinity;
};
/*
- * Bit masks for irq_data.state
+ * Bit masks for irq_common_data.state_use_accessors
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
@@ -184,6 +192,7 @@ struct irq_data {
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
+ * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -198,36 +207,39 @@ enum {
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
+ IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
+#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+ return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
}
static inline bool irqd_is_per_cpu(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_PER_CPU;
+ return __irqd_to_state(d) & IRQD_PER_CPU;
}
static inline bool irqd_can_balance(struct irq_data *d)
{
- return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+ return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}
static inline bool irqd_affinity_was_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_AFFINITY_SET;
+ return __irqd_to_state(d) & IRQD_AFFINITY_SET;
}
static inline void irqd_mark_affinity_was_set(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_AFFINITY_SET;
+ __irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_TRIGGER_MASK;
+ return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
@@ -235,59 +247,58 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d)
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
- d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
- d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
}
static inline bool irqd_is_level_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_LEVEL;
+ return __irqd_to_state(d) & IRQD_LEVEL;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_STATE;
+ return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
static inline bool irqd_can_move_in_process_context(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+ return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
}
static inline bool irqd_irq_disabled(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_DISABLED;
+ return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
}
static inline bool irqd_irq_masked(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_MASKED;
+ return __irqd_to_state(d) & IRQD_IRQ_MASKED;
}
static inline bool irqd_irq_inprogress(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
+ return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
}
static inline bool irqd_is_wakeup_armed(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+ return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
+static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
+}
-/*
- * Functions for chained handlers which can be enabled/disabled by the
- * standard disable_irq/enable_irq calls. Must be called with
- * irq_desc->lock held.
- */
-static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
+static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
}
-static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
+static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
{
- d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -316,8 +327,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
- * @irq_suspend: function called from core code on suspend once per chip
- * @irq_resume: function called from core code on resume once per chip
+ * @irq_suspend: function called from core code on suspend once per
+ * chip, when one or more interrupts are installed
+ * @irq_resume: function called from core code on resume once per chip,
+ * when one ore more interrupts are installed
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_calc_mask: Optional function to set irq_data.mask for special cases
* @irq_print_chip: optional to print special chip info in show_interrupts
@@ -329,6 +342,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_write_msi_msg: optional to write message content for MSI
* @irq_get_irqchip_state: return the internal state of an interrupt
* @irq_set_irqchip_state: set the internal state of a interrupt
+ * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @flags: chip specific flags
*/
struct irq_chip {
@@ -371,6 +385,8 @@ struct irq_chip {
int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
+ int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+
unsigned long flags;
};
@@ -396,7 +412,6 @@ enum {
IRQCHIP_EOI_THREADED = (1 << 6),
};
-/* This include will go away once we isolated irq_desc usage to core code */
#include <linux/irqdesc.h>
/*
@@ -424,6 +439,9 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
+extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
+
+extern void irq_migrate_all_off_this_cpu(void);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
@@ -448,18 +466,20 @@ static inline int irq_set_parent(int irq, int parent_irq)
* Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq()
*/
-extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_level_irq(struct irq_desc *desc);
+extern void handle_fasteoi_irq(struct irq_desc *desc);
+extern void handle_edge_irq(struct irq_desc *desc);
+extern void handle_edge_eoi_irq(struct irq_desc *desc);
+extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_percpu_irq(struct irq_desc *desc);
+extern void handle_percpu_devid_irq(struct irq_desc *desc);
+extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_enable_parent(struct irq_data *data);
+extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
@@ -469,12 +489,13 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
+ void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
/* Enable/disable irq debugging output: */
@@ -520,6 +541,15 @@ irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
__irq_set_handler(irq, handle, 1, NULL);
}
+/*
+ * Set a highlevel chained flow handler and its data for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
+ */
+void
+irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
+ void *data);
+
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -602,23 +632,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->handler_data : NULL;
+ return d ? d->common->handler_data : NULL;
}
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
- return d->handler_data;
+ return d->common->handler_data;
}
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->msi_desc : NULL;
+ return d ? d->common->msi_desc : NULL;
}
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
- return d->msi_desc;
+ return d->common->msi_desc;
}
static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -627,6 +657,32 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
+static inline int irq_common_data_get_node(struct irq_common_data *d)
+{
+#ifdef CONFIG_NUMA
+ return d->node;
+#else
+ return 0;
+#endif
+}
+
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return irq_common_data_get_node(d->common);
+}
+
+static inline struct cpumask *irq_get_affinity_mask(int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? d->common->affinity : NULL;
+}
+
+static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+{
+ return d->common->affinity;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
@@ -721,6 +777,12 @@ struct irq_chip_type {
* @reg_base: Register base address (virtual)
* @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
* @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
+ * @suspend: Function called from core code on suspend once per
+ * chip; can be useful instead of irq_chip::suspend to
+ * handle chip details even when no interrupts are in use
+ * @resume: Function called from core code on resume once per chip;
+ * can be useful instead of irq_chip::suspend to handle
+ * chip details even when no interrupts are in use
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -747,6 +809,8 @@ struct irq_chip_generic {
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
+ void (*suspend)(struct irq_chip_generic *gc);
+ void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
diff --git a/kernel/include/linux/irq_work.h b/kernel/include/linux/irq_work.h
index 0e427a999..2543aab05 100644
--- a/kernel/include/linux/irq_work.h
+++ b/kernel/include/linux/irq_work.h
@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
#endif
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void);
+#else
+static inline void irq_work_tick_soft(void) { }
+#endif
+
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/kernel/include/linux/irqbypass.h b/kernel/include/linux/irqbypass.h
new file mode 100644
index 000000000..1551b5b2f
--- /dev/null
+++ b/kernel/include/linux/irqbypass.h
@@ -0,0 +1,90 @@
+/*
+ * IRQ offload/bypass manager
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ * Copyright (c) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef IRQBYPASS_H
+#define IRQBYPASS_H
+
+#include <linux/list.h>
+
+struct irq_bypass_consumer;
+
+/*
+ * Theory of operation
+ *
+ * The IRQ bypass manager is a simple set of lists and callbacks that allows
+ * IRQ producers (ex. physical interrupt sources) to be matched to IRQ
+ * consumers (ex. virtualization hardware that allows IRQ bypass or offload)
+ * via a shared token (ex. eventfd_ctx). Producers and consumers register
+ * independently. When a token match is found, the optional @stop callback
+ * will be called for each participant. The pair will then be connected via
+ * the @add_* callbacks, and finally the optional @start callback will allow
+ * any final coordination. When either participant is unregistered, the
+ * process is repeated using the @del_* callbacks in place of the @add_*
+ * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
+ * are not supported.
+ */
+
+/**
+ * struct irq_bypass_producer - IRQ bypass producer definition
+ * @node: IRQ bypass manager private list management
+ * @token: opaque token to match between producer and consumer
+ * @irq: Linux IRQ number for the producer device
+ * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
+ * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
+ * @stop: Perform any quiesce operations necessary prior to add/del (optional)
+ * @start: Perform any startup operations necessary after add/del (optional)
+ *
+ * The IRQ bypass producer structure represents an interrupt source for
+ * participation in possible host bypass, for instance an interrupt vector
+ * for a physical device assigned to a VM.
+ */
+struct irq_bypass_producer {
+ struct list_head node;
+ void *token;
+ int irq;
+ int (*add_consumer)(struct irq_bypass_producer *,
+ struct irq_bypass_consumer *);
+ void (*del_consumer)(struct irq_bypass_producer *,
+ struct irq_bypass_consumer *);
+ void (*stop)(struct irq_bypass_producer *);
+ void (*start)(struct irq_bypass_producer *);
+};
+
+/**
+ * struct irq_bypass_consumer - IRQ bypass consumer definition
+ * @node: IRQ bypass manager private list management
+ * @token: opaque token to match between producer and consumer
+ * @add_producer: Connect the IRQ consumer to an IRQ producer
+ * @del_producer: Disconnect the IRQ consumer from an IRQ producer
+ * @stop: Perform any quiesce operations necessary prior to add/del (optional)
+ * @start: Perform any startup operations necessary after add/del (optional)
+ *
+ * The IRQ bypass consumer structure represents an interrupt sink for
+ * participation in possible host bypass, for instance a hypervisor may
+ * support offloads to allow bypassing the host entirely or offload
+ * portions of the interrupt handling to the VM.
+ */
+struct irq_bypass_consumer {
+ struct list_head node;
+ void *token;
+ int (*add_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*del_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*stop)(struct irq_bypass_consumer *);
+ void (*start)(struct irq_bypass_consumer *);
+};
+
+int irq_bypass_register_producer(struct irq_bypass_producer *);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+
+#endif /* IRQBYPASS_H */
diff --git a/kernel/include/linux/irqchip.h b/kernel/include/linux/irqchip.h
index 14d79131f..89c34b200 100644
--- a/kernel/include/linux/irqchip.h
+++ b/kernel/include/linux/irqchip.h
@@ -11,6 +11,37 @@
#ifndef _LINUX_IRQCHIP_H
#define _LINUX_IRQCHIP_H
+#include <linux/acpi.h>
+#include <linux/of.h>
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their version and their initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * same file.
+ * @subtable: Subtable to be identified in MADT
+ * @validate: Function to be called on that subtable to check its validity.
+ * Can be NULL.
+ * @data: data to be checked by the validate function.
+ * @fn: initialization function
+ */
+#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \
+ ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \
+ subtable, validate, data, fn)
+
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
#else
diff --git a/kernel/include/linux/irqchip/arm-gic-acpi.h b/kernel/include/linux/irqchip/arm-gic-acpi.h
deleted file mode 100644
index de3419ed3..000000000
--- a/kernel/include/linux/irqchip/arm-gic-acpi.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2014, Linaro Ltd.
- * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ARM_GIC_ACPI_H_
-#define ARM_GIC_ACPI_H_
-
-#ifdef CONFIG_ACPI
-
-/*
- * Hard code here, we can not get memory size from MADT (but FDT does),
- * Actually no need to do that, because this size can be inferred
- * from GIC spec.
- */
-#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
-#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
-
-struct acpi_table_header;
-
-int gic_v2_acpi_init(struct acpi_table_header *table);
-void acpi_gic_init(void);
-#else
-static inline void acpi_gic_init(void) { }
-#endif
-
-#endif /* ARM_GIC_ACPI_H_ */
diff --git a/kernel/include/linux/irqchip/arm-gic-v3.h b/kernel/include/linux/irqchip/arm-gic-v3.h
index ffbc034c8..d5d798b35 100644
--- a/kernel/include/linux/irqchip/arm-gic-v3.h
+++ b/kernel/include/linux/irqchip/arm-gic-v3.h
@@ -18,8 +18,6 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
-#include <asm/sysreg.h>
-
/*
* Distributor registers. We assume we're running non-secure, with ARE
* being set. Secure-only and non-ARE registers are not described.
@@ -104,6 +102,8 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_ISACTIVER GICD_ISACTIVER
+#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
@@ -229,6 +229,7 @@
#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
@@ -264,13 +265,16 @@
/*
* Hypervisor interface registers (SRE only)
*/
-#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
+#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-#define ICH_LR_EOI (1UL << 41)
-#define ICH_LR_GROUP (1UL << 60)
-#define ICH_LR_STATE (3UL << 62)
-#define ICH_LR_PENDING_BIT (1UL << 62)
-#define ICH_LR_ACTIVE_BIT (1UL << 63)
+#define ICH_LR_EOI (1ULL << 41)
+#define ICH_LR_GROUP (1ULL << 60)
+#define ICH_LR_HW (1ULL << 61)
+#define ICH_LR_STATE (3ULL << 62)
+#define ICH_LR_PENDING_BIT (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT (1ULL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
@@ -287,18 +291,8 @@
#define ICH_VMCR_PMR_SHIFT 24
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
-#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
-#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
-#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
-#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
-#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
-#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
-
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
-#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-
#define ICC_SRE_EL2_SRE (1 << 0)
#define ICC_SRE_EL2_ENABLE (1 << 3)
@@ -314,53 +308,10 @@
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
-/*
- * System register definitions
- */
-#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
-#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
-#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
-#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
-
-#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
-#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
-
-#define ICH_LR0_EL2 __LR0_EL2(0)
-#define ICH_LR1_EL2 __LR0_EL2(1)
-#define ICH_LR2_EL2 __LR0_EL2(2)
-#define ICH_LR3_EL2 __LR0_EL2(3)
-#define ICH_LR4_EL2 __LR0_EL2(4)
-#define ICH_LR5_EL2 __LR0_EL2(5)
-#define ICH_LR6_EL2 __LR0_EL2(6)
-#define ICH_LR7_EL2 __LR0_EL2(7)
-#define ICH_LR8_EL2 __LR8_EL2(0)
-#define ICH_LR9_EL2 __LR8_EL2(1)
-#define ICH_LR10_EL2 __LR8_EL2(2)
-#define ICH_LR11_EL2 __LR8_EL2(3)
-#define ICH_LR12_EL2 __LR8_EL2(4)
-#define ICH_LR13_EL2 __LR8_EL2(5)
-#define ICH_LR14_EL2 __LR8_EL2(6)
-#define ICH_LR15_EL2 __LR8_EL2(7)
-
-#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
-#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
-#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
-#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
-#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
-
-#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
-#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
-#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
-#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
-#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
+#include <asm/arch_gicv3.h>
#ifndef __ASSEMBLY__
-#include <linux/stringify.h>
-
/*
* We need a value to serve as a irq-type for LPIs. Choose one that will
* hopefully pique the interest of the reviewer.
@@ -378,17 +329,27 @@ struct rdists {
u64 flags;
};
-static inline void gic_write_eoir(u64 irq)
-{
- asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
- isb();
-}
-
struct irq_domain;
+struct device_node;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
struct irq_domain *domain);
+static inline bool gic_enable_sre(void)
+{
+ u32 val;
+
+ val = gic_read_sre();
+ if (val & ICC_SRE_EL1_SRE)
+ return true;
+
+ val |= ICC_SRE_EL1_SRE;
+ gic_write_sre(val);
+ val = gic_read_sre();
+
+ return !!(val & ICC_SRE_EL1_SRE);
+}
+
#endif
#endif
diff --git a/kernel/include/linux/irqchip/arm-gic.h b/kernel/include/linux/irqchip/arm-gic.h
index 9de976b4f..bae69e5d6 100644
--- a/kernel/include/linux/irqchip/arm-gic.h
+++ b/kernel/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GIC_CPU_DEACTIVATE 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
+
+#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
+
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
-#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
@@ -95,17 +100,11 @@
struct device_node;
-void gic_set_irqchip_flags(unsigned long flags);
-void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
- u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_cpu_if_down(void);
+int gic_cpu_if_down(unsigned int gic_nr);
-static inline void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu)
-{
- gic_init_bases(nr, start, dist, cpu, 0, NULL);
-}
+void gic_init(unsigned int nr, int start,
+ void __iomem *dist , void __iomem *cpu);
int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
diff --git a/kernel/include/linux/irqchip/ingenic.h b/kernel/include/linux/irqchip/ingenic.h
new file mode 100644
index 000000000..0ee319a40
--- /dev/null
+++ b/kernel/include/linux/irqchip/ingenic.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_IRQCHIP_INGENIC_H__
+#define __LINUX_IRQCHIP_INGENIC_H__
+
+#include <linux/irq.h>
+
+extern void ingenic_intc_irq_suspend(struct irq_data *data);
+extern void ingenic_intc_irq_resume(struct irq_data *data);
+
+#endif
diff --git a/kernel/include/linux/irqchip/irq-sa11x0.h b/kernel/include/linux/irqchip/irq-sa11x0.h
new file mode 100644
index 000000000..15db6829c
--- /dev/null
+++ b/kernel/include/linux/irqchip/irq-sa11x0.h
@@ -0,0 +1,17 @@
+/*
+ * Generic IRQ handling for the SA11x0.
+ *
+ * Copyright (C) 2015 Dmitry Eremin-Solenikov
+ * Copyright (C) 1999-2001 Nicolas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
+#define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
+
+void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start);
+
+#endif
diff --git a/kernel/include/linux/irqchip/mips-gic.h b/kernel/include/linux/irqchip/mips-gic.h
index 9b1ad3734..ce824db48 100644
--- a/kernel/include/linux/irqchip/mips-gic.h
+++ b/kernel/include/linux/irqchip/mips-gic.h
@@ -9,6 +9,7 @@
#define __LINUX_IRQCHIP_MIPS_GIC_H
#include <linux/clocksource.h>
+#include <linux/ioport.h>
#define GIC_MAX_INTRS 256
@@ -41,12 +42,20 @@
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr) ((intr) % 32)
+#define GIC_INTR_OFS(intr) ({ \
+ unsigned bits = mips_cm_is64 ? 64 : 32; \
+ unsigned reg_idx = (intr) / bits; \
+ unsigned reg_width = bits / 8; \
+ \
+ reg_idx * reg_width; \
+})
+#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
/* Polarity : Reset Value is always 0 */
#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +107,8 @@
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
@@ -235,6 +246,8 @@
#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
+#ifdef CONFIG_MIPS_GIC
+
extern unsigned int gic_present;
extern void gic_init(unsigned long gic_base_addr,
@@ -254,4 +267,18 @@ extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
extern int gic_get_c0_fdc_int(void);
+extern int gic_get_usm_range(struct resource *gic_usm_res);
+
+#else /* CONFIG_MIPS_GIC */
+
+#define gic_present 0
+
+static inline int gic_get_usm_range(struct resource *gic_usm_res)
+{
+ /* Shouldn't be called. */
+ return -1;
+}
+
+#endif /* CONFIG_MIPS_GIC */
+
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/kernel/include/linux/irqdesc.h b/kernel/include/linux/irqdesc.h
index 9d97cd5bb..ad57402a2 100644
--- a/kernel/include/linux/irqdesc.h
+++ b/kernel/include/linux/irqdesc.h
@@ -3,9 +3,6 @@
/*
* Core internal functions to deal with irq descriptors
- *
- * This include will move to kernel/irq once we cleaned up the tree.
- * For now it's included from <linux/irq.h>
*/
struct irq_affinity_notify;
@@ -17,7 +14,7 @@ struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
- * @irq_data: per irq and chip data passed down to chip functions
+ * @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
@@ -47,6 +44,7 @@ struct pt_regs;
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ struct irq_common_data irq_common_data;
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
@@ -90,10 +88,25 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
-#ifndef CONFIG_SPARSE_IRQ
+#ifdef CONFIG_SPARSE_IRQ
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
+#else
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
+{
+ return container_of(data->common, struct irq_desc, irq_common_data);
+}
+
+static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
+{
+ return desc->irq_data.irq;
+}
+
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
@@ -111,23 +124,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
- return desc->irq_data.handler_data;
+ return desc->irq_common_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
- return desc->irq_data.msi_desc;
+ return desc->irq_common_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ * handle an interrupt.
*/
-static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
- desc->handle_irq(irq, desc);
+ desc->handle_irq(desc);
}
int generic_handle_irq(unsigned int irq);
@@ -150,33 +161,55 @@ static inline int handle_domain_irq(struct irq_domain *domain,
#endif
/* Test to see if a driver has successfully requested an irq */
-static inline int irq_has_action(unsigned int irq)
+static inline int irq_desc_has_action(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
-/* caller has locked the irq_desc and both params are valid */
-static inline void __irq_set_handler_locked(unsigned int irq,
- irq_flow_handler_t handler)
+static inline int irq_has_action(unsigned int irq)
{
- struct irq_desc *desc;
+ return irq_desc_has_action(irq_to_desc(irq));
+}
+
+/**
+ * irq_set_handler_locked - Set irq handler from a locked region
+ * @data: Pointer to the irq_data structure which identifies the irq
+ * @handler: Flow control handler function for this interrupt
+ *
+ * Sets the handler in the irq descriptor associated to @data.
+ *
+ * Must be called with irq_desc locked and valid parameters. Typical
+ * call site is the irq_set_type() callback.
+ */
+static inline void irq_set_handler_locked(struct irq_data *data,
+ irq_flow_handler_t handler)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
- desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
-/* caller has locked the irq_desc and both params are valid */
+/**
+ * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
+ * @data: Pointer to the irq_data structure for which the chip is set
+ * @chip: Pointer to the new irq chip
+ * @handler: Flow control handler function for this interrupt
+ * @name: Name of the interrupt
+ *
+ * Replace the irq chip at the proper hierarchy level in @data and
+ * sets the handler and name in the associated irq descriptor.
+ *
+ * Must be called with irq_desc locked and valid parameters.
+ */
static inline void
-__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handler, const char *name)
+irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+ irq_flow_handler_t handler, const char *name)
{
- struct irq_desc *desc;
+ struct irq_desc *desc = irq_data_to_desc(data);
- desc = irq_to_desc(irq);
- irq_desc_get_irq_data(desc)->chip = chip;
desc->handle_irq = handler;
desc->name = name;
+ data->chip = chip;
}
static inline int irq_balancing_disabled(unsigned int irq)
diff --git a/kernel/include/linux/irqdomain.h b/kernel/include/linux/irqdomain.h
index 676d7306a..d5e5c5bef 100644
--- a/kernel/include/linux/irqdomain.h
+++ b/kernel/include/linux/irqdomain.h
@@ -5,9 +5,10 @@
* helpful for interrupt controllers to implement mapping between hardware
* irq numbers and the Linux irq number space.
*
- * irq_domains also have a hook for translating device tree interrupt
- * representation into a hardware irq number that can be mapped back to a
- * Linux irq number without any extra platform support code.
+ * irq_domains also have hooks for translating device tree or other
+ * firmware interrupt representations into a hardware irq number that
+ * can be mapped back to a Linux irq number without any extra platform
+ * support code.
*
* Interrupt controller "domain" data structure. This could be defined as a
* irq domain controller. That is, it handles the mapping between hardware
@@ -17,16 +18,12 @@
* model). It's the domain callbacks that are responsible for setting the
* irq_chip on a given irq_desc after it's been mapped.
*
- * The host code and data structures are agnostic to whether or not
- * we use an open firmware device-tree. We do have references to struct
- * device_node in two places: in irq_find_host() to find the host matching
- * a given interrupt controller node, and of course as an argument to its
- * counterpart domain->ops->match() callback. However, those are treated as
- * generic pointers by the core and the fact that it's actually a device-node
- * pointer is purely a convention between callers and implementation. This
- * code could thus be used on other architectures by replacing those two
- * by some sort of arch-specific void * "token" used to identify interrupt
- * controllers.
+ * The host code and data structures use a fwnode_handle pointer to
+ * identify the domain. In some cases, and in order to preserve source
+ * code compatibility, this fwnode pointer is "upgraded" to a DT
+ * device_node. For those firmware infrastructures that do not provide
+ * a unique identifier for an interrupt controller, the irq_domain
+ * code offers a fwnode allocator.
*/
#ifndef _LINUX_IRQDOMAIN_H
@@ -34,6 +31,7 @@
#include <linux/types.h>
#include <linux/irqhandler.h>
+#include <linux/of.h>
#include <linux/radix-tree.h>
struct device_node;
@@ -45,6 +43,38 @@ struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
+#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
+
+/**
+ * struct irq_fwspec - generic IRQ specifier structure
+ *
+ * @fwnode: Pointer to a firmware-specific descriptor
+ * @param_count: Number of device-specific parameters
+ * @param: Device-specific parameters
+ *
+ * This structure, directly modeled after of_phandle_args, is used to
+ * pass a device-specific description of an interrupt.
+ */
+struct irq_fwspec {
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+};
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+};
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +91,8 @@ struct irq_data;
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -76,6 +107,8 @@ struct irq_domain_ops {
unsigned int nr_irqs);
void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
#endif
};
@@ -115,7 +148,8 @@ struct irq_domain {
unsigned int flags;
/* Optional data */
- struct device_node *of_node;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
@@ -145,8 +179,15 @@ enum {
IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
};
+static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
+{
+ return to_of_node(d->fwnode);
+}
+
#ifdef CONFIG_IRQ_DOMAIN
-struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+struct fwnode_handle *irq_domain_alloc_fwnode(void *data);
+void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
@@ -161,9 +202,26 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
+{
+ return node ? &node->fwnode : NULL;
+}
+
+static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+}
+
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+ return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
@@ -176,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
const struct irq_domain_ops *ops,
void *host_data)
{
- return __irq_domain_add(of_node, size, size, 0, ops, host_data);
+ return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
}
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
- return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data);
+ return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
}
static inline struct irq_domain *irq_domain_add_legacy_isa(
struct device_node *of_node,
@@ -197,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node
const struct irq_domain_ops *ops,
void *host_data)
{
- return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data);
+ return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
+}
+
+static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
+}
+
+static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
}
extern void irq_domain_remove(struct irq_domain *host);
@@ -212,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain,
extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
+extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
extern void irq_dispose_mapping(unsigned int virq);
/**
@@ -258,11 +332,28 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
- struct device_node *node,
+ struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops, void *host_data);
+
+static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+ unsigned int flags,
+ unsigned int size,
+ struct device_node *node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return irq_domain_create_hierarchy(parent, flags, size,
+ of_node_to_fwnode(node),
+ ops, host_data);
+}
+
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc);
@@ -281,10 +372,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
diff --git a/kernel/include/linux/irqhandler.h b/kernel/include/linux/irqhandler.h
index 62d543004..661bed0ed 100644
--- a/kernel/include/linux/irqhandler.h
+++ b/kernel/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
struct irq_desc;
struct irq_data;
-typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/kernel/include/linux/irqnr.h b/kernel/include/linux/irqnr.h
index fdd5cc16c..9669bf9d4 100644
--- a/kernel/include/linux/irqnr.h
+++ b/kernel/include/linux/irqnr.h
@@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
; \
else
-#ifdef CONFIG_SMP
-#define irq_node(irq) (irq_get_irq_data(irq)->node)
-#else
-#define irq_node(irq) 0
-#endif
-
# define for_each_active_irq(irq) \
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
diff --git a/kernel/include/linux/irqreturn.h b/kernel/include/linux/irqreturn.h
index e374e369f..eb1bdcf95 100644
--- a/kernel/include/linux/irqreturn.h
+++ b/kernel/include/linux/irqreturn.h
@@ -3,7 +3,7 @@
/**
* enum irqreturn
- * @IRQ_NONE interrupt was not from this device
+ * @IRQ_NONE interrupt was not from this device or was not handled
* @IRQ_HANDLED interrupt was handled by this device
* @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
diff --git a/kernel/include/linux/jbd.h b/kernel/include/linux/jbd.h
deleted file mode 100644
index d32615280..000000000
--- a/kernel/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs. Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks. By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
- __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...) /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle. Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process. To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time. When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s handle_t; /* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s journal_t; /* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK 1
-#define JFS_COMMIT_BLOCK 2
-#define JFS_SUPERBLOCK_V1 3
-#define JFS_SUPERBLOCK_V2 4
-#define JFS_REVOKE_BLOCK 5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
- __be32 h_magic;
- __be32 h_blocktype;
- __be32 h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
- __be32 t_blocknr; /* The on-disk block number */
- __be32 t_flags; /* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
- journal_header_t r_header;
- __be32 r_count; /* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
-#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
-
-
-/*
- * The journal superblock. All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
- journal_header_t s_header;
-
-/* 0x000C */
- /* Static information describing the journal */
- __be32 s_blocksize; /* journal device blocksize */
- __be32 s_maxlen; /* total blocks in journal file */
- __be32 s_first; /* first block of log information */
-
-/* 0x0018 */
- /* Dynamic information describing the current state of the log */
- __be32 s_sequence; /* first commit ID expected in log */
- __be32 s_start; /* blocknr of start of log */
-
-/* 0x0020 */
- /* Error value, as set by journal_abort(). */
- __be32 s_errno;
-
-/* 0x0024 */
- /* Remaining fields are only valid in a version-2 superblock */
- __be32 s_feature_compat; /* compatible feature set */
- __be32 s_feature_incompat; /* incompatible feature set */
- __be32 s_feature_ro_compat; /* readonly-compatible feature set */
-/* 0x0030 */
- __u8 s_uuid[16]; /* 128-bit uuid for journal */
-
-/* 0x0040 */
- __be32 s_nr_users; /* Nr of filesystems sharing log */
-
- __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
- __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
- __be32 s_max_trans_data; /* Limit of data blocks per trans. */
-
-/* 0x0050 */
- __u32 s_padding[44];
-
-/* 0x0100 */
- __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES 0
-#define JFS_KNOWN_ROCOMPAT_FEATURES 0
-#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert) BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...) J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...) \
- ({ \
- int val = (expr); \
- if (!val) { \
- printk(KERN_ERR \
- "EXT3-fs unexpected failure: %s;\n",# expr); \
- printk(KERN_ERR why "\n"); \
- } \
- val; \
- })
-#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
- /* Which compound transaction is this update a part of? */
- transaction_t *h_transaction;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
- int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
- int h_err;
-
- /* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_aborted: 1; /* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism. It
- * tracks a compound transaction through its various states:
- *
- * RUNNING: accepting new updates
- * LOCKED: Updates still running but we don't accept new ones
- * RUNDOWN: Updates are tidying up but have finished requesting
- * new buffers to modify (state not used for now)
- * FLUSH: All updates complete, but we are still writing to disk
- * COMMIT: All data on disk, writing commit record
- * FINISHED: We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- * j_list_lock
- * ->jbd_lock_bh_journal_head() (This is "innermost")
- *
- * j_state_lock
- * ->jbd_lock_bh_state()
- *
- * jbd_lock_bh_state()
- * ->j_list_lock
- *
- * j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
- * ->j_list_lock (journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
- /* Pointer to the journal for this transaction. [no locking] */
- journal_t *t_journal;
-
- /* Sequence number for this transaction [no locking] */
- tid_t t_tid;
-
- /*
- * Transaction's current state
- * [no locking - only kjournald alters this]
- * [j_list_lock] guards transition of a transaction into T_FINISHED
- * state and subsequent call of __journal_drop_transaction()
- * FIXME: needs barriers
- * KLUDGE: [use j_state_lock]
- */
- enum {
- T_RUNNING,
- T_LOCKED,
- T_FLUSH,
- T_COMMIT,
- T_COMMIT_RECORD,
- T_FINISHED
- } t_state;
-
- /*
- * Where in the log does this transaction's commit start? [no locking]
- */
- unsigned int t_log_start;
-
- /* Number of buffers on the t_buffers list [j_list_lock] */
- int t_nr_buffers;
-
- /*
- * Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
- */
- struct journal_head *t_reserved_list;
-
- /*
- * Doubly-linked circular list of all buffers under writeout during
- * commit [j_list_lock]
- */
- struct journal_head *t_locked_list;
-
- /*
- * Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
- */
- struct journal_head *t_buffers;
-
- /*
- * Doubly-linked circular list of all data buffers still to be
- * flushed before this transaction can be committed [j_list_lock]
- */
- struct journal_head *t_sync_datalist;
-
- /*
- * Doubly-linked circular list of all forget buffers (superseded
- * buffers which we can un-checkpoint once this transaction commits)
- * [j_list_lock]
- */
- struct journal_head *t_forget;
-
- /*
- * Doubly-linked circular list of all buffers still to be flushed before
- * this transaction can be checkpointed. [j_list_lock]
- */
- struct journal_head *t_checkpoint_list;
-
- /*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of temporary buffers currently undergoing
- * IO in the log [j_list_lock]
- */
- struct journal_head *t_iobuf_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
- */
- struct journal_head *t_shadow_list;
-
- /*
- * Doubly-linked circular list of control buffers being written to the
- * log. [j_list_lock]
- */
- struct journal_head *t_log_list;
-
- /*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
- * Number of outstanding updates running on this transaction
- * [t_handle_lock]
- */
- int t_updates;
-
- /*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
- */
- int t_outstanding_credits;
-
- /*
- * Forward and backward links for the circular list of all transactions
- * awaiting checkpoint. [j_list_lock]
- */
- transaction_t *t_cpnext, *t_cpprev;
-
- /*
- * When will the transaction expire (become due for commit), in jiffies?
- * [no locking]
- */
- unsigned long t_expires;
-
- /*
- * When this transaction started, in nanoseconds [no locking]
- */
- ktime_t t_start_time;
-
- /*
- * How many handles used this transaction? [t_handle_lock]
- */
- int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint: Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- * is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- * takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
- /* General journaling state flags [j_state_lock] */
- unsigned long j_flags;
-
- /*
- * Is there an outstanding uncleared error on the journal (from a prior
- * abort)? [j_state_lock]
- */
- int j_errno;
-
- /* The superblock buffer */
- struct buffer_head *j_sb_buffer;
- journal_superblock_t *j_superblock;
-
- /* Version of the superblock format */
- int j_format_version;
-
- /*
- * Protect the various scalars in the journal
- */
- spinlock_t j_state_lock;
-
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
- */
- int j_barrier_count;
-
- /*
- * Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_running_transaction;
-
- /*
- * the transaction we are pushing to disk
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_committing_transaction;
-
- /*
- * ... and a linked circular list of all transactions waiting for
- * checkpointing. [j_list_lock]
- */
- transaction_t *j_checkpoint_transactions;
-
- /*
- * Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
- */
- wait_queue_head_t j_wait_transaction_locked;
-
- /* Wait queue for waiting for checkpointing to complete */
- wait_queue_head_t j_wait_logspace;
-
- /* Wait queue for waiting for commit to complete */
- wait_queue_head_t j_wait_done_commit;
-
- /* Wait queue to trigger checkpointing */
- wait_queue_head_t j_wait_checkpoint;
-
- /* Wait queue to trigger commit */
- wait_queue_head_t j_wait_commit;
-
- /* Wait queue to wait for updates to complete */
- wait_queue_head_t j_wait_updates;
-
- /* Semaphore for locking against concurrent checkpoints */
- struct mutex j_checkpoint_mutex;
-
- /*
- * Journal head: identifies the first unused block in the journal.
- * [j_state_lock]
- */
- unsigned int j_head;
-
- /*
- * Journal tail: identifies the oldest still-used block in the journal.
- * [j_state_lock]
- */
- unsigned int j_tail;
-
- /*
- * Journal free: how many free blocks are there in the journal?
- * [j_state_lock]
- */
- unsigned int j_free;
-
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
- */
- unsigned int j_first;
- unsigned int j_last;
-
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
- */
- struct block_device *j_dev;
- int j_blocksize;
- unsigned int j_blk_offset;
-
- /*
- * Device which holds the client fs. For internal journal this will be
- * equal to j_dev.
- */
- struct block_device *j_fs_dev;
-
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
-
- /*
- * Protects the buffer lists and internal buffer state.
- */
- spinlock_t j_list_lock;
-
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
- struct inode *j_inode;
-
- /*
- * Sequence number of the oldest transaction in the log [j_state_lock]
- */
- tid_t j_tail_sequence;
-
- /*
- * Sequence number of the next transaction to grant [j_state_lock]
- */
- tid_t j_transaction_sequence;
-
- /*
- * Sequence number of the most recently committed transaction
- * [j_state_lock].
- */
- tid_t j_commit_sequence;
-
- /*
- * Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
- */
- tid_t j_commit_request;
-
- /*
- * Sequence number of the most recent transaction someone is waiting
- * for to commit.
- * [j_state_lock]
- */
- tid_t j_commit_waited;
-
- /*
- * Journal uuid: identifies the object (filesystem, LVM volume etc)
- * backed by this journal. This will eventually be replaced by an array
- * of uuids, allowing us to index multiple devices within a single
- * journal and to perform atomic updates across them.
- */
- __u8 j_uuid[16];
-
- /* Pointer to the current commit thread for this journal */
- struct task_struct *j_task;
-
- /*
- * Maximum number of metadata buffers to allow in a single compound
- * commit transaction
- */
- int j_max_transaction_buffers;
-
- /*
- * What is the maximum transaction lifetime before we begin a commit?
- */
- unsigned long j_commit_interval;
-
- /* The timer used to wakeup the commit thread: */
- struct timer_list j_commit_timer;
-
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
- */
- spinlock_t j_revoke_lock;
- struct jbd_revoke_table_s *j_revoke;
- struct jbd_revoke_table_s *j_revoke_table[2];
-
- /*
- * array of bhs for journal_commit_transaction
- */
- struct buffer_head **j_wbuf;
- int j_wbufsize;
-
- /*
- * this is the pid of the last person to run a synchronous operation
- * through the journal.
- */
- pid_t j_last_sync_writer;
-
- /*
- * the average amount of time in nanoseconds it takes to commit a
- * transaction to the disk. [j_state_lock]
- */
- u64 j_average_commit_time;
-
- /*
- * An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
- */
- void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
-#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
-#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
-#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
-#define JFS_BARRIER 0x020 /* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t *transaction,
- struct journal_head *jh_in,
- struct journal_head **jh_out,
- unsigned int blocknr);
-
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase. j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe. We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
- return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int journal_restart (handle_t *, int nblocks);
-extern int journal_extend (handle_t *, int nblocks);
-extern int journal_get_write_access(handle_t *, struct buffer_head *);
-extern int journal_get_create_access (handle_t *, struct buffer_head *);
-extern int journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int journal_dirty_data (handle_t *, struct buffer_head *);
-extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void journal_release_buffer (handle_t *, struct buffer_head *);
-extern int journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern void journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int journal_stop(handle_t *);
-extern int journal_flush (journal_t *);
-extern void journal_lock_updates (journal_t *);
-extern void journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
- struct block_device *fs_dev,
- int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int journal_update_format (journal_t *);
-extern int journal_check_used_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_check_available_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_set_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_create (journal_t *);
-extern int journal_load (journal_t *journal);
-extern int journal_destroy (journal_t *);
-extern int journal_recover (journal_t *journal);
-extern int journal_wipe (journal_t *, int);
-extern int journal_skip_recovery (journal_t *);
-extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
- int);
-extern void journal_abort (journal_t *, int);
-extern int journal_errno (journal_t *);
-extern void journal_ack_err (journal_t *);
-extern int journal_clear_err (journal_t *);
-extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
- kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int journal_init_revoke(journal_t *, int);
-extern void journal_destroy_revoke_caches(void);
-extern int journal_init_revoke_caches(void);
-
-extern void journal_destroy_revoke(journal_t *);
-extern int journal_revoke (handle_t *,
- unsigned int, struct buffer_head *);
-extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *,
- transaction_t *, int);
-
-/* Recovery revoke support */
-extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void journal_clear_revoke(journal_t *);
-extern void journal_switch_revoke_table(journal_t *journal);
-extern void journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void __journal_drop_transaction(journal_t *, transaction_t *);
-extern int cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag. This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
- return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
- if (handle->h_aborted)
- return 1;
- return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
- handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__ */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- if (journal->j_committing_transaction)
- nblocks += journal->j_committing_transaction->
- t_outstanding_credits;
- return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None 0 /* Not journaled */
-#define BJ_SyncData 1 /* Normal data: flush before commit */
-#define BJ_Metadata 2 /* Normal journaled metadata */
-#define BJ_Forget 3 /* Buffer superseded by this transaction */
-#define BJ_IO 4 /* Buffer is for temporary IO use */
-#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl 6 /* Buffer contains log descriptors */
-#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
-#define BJ_Locked 8 /* Locked for I/O during commit */
-#define BJ_Types 9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh) do {} while (0)
-#define print_buffer_fields(bh) do {} while (0)
-#define print_buffer_trace(bh) do {} while (0)
-#define BUFFER_TRACE(bh, info) do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
-#define JBUFFER_TRACE(jh, info) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_JBD_H */
diff --git a/kernel/include/linux/jbd2.h b/kernel/include/linux/jbd2.h
index eb1cebed3..eb5aabe4e 100644
--- a/kernel/include/linux/jbd2.h
+++ b/kernel/include/linux/jbd2.h
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <linux/bit_spinlock.h>
#include <crypto/hash.h>
#endif
@@ -277,6 +278,7 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
+/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */
#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
@@ -287,7 +289,7 @@ typedef struct journal_superblock_s
((j)->j_format_version >= 2 && \
((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
+#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
@@ -295,6 +297,8 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+/* See "journal feature predicate functions" below */
+
/* Features known to this kernel version: */
#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
@@ -336,7 +340,69 @@ BUFFER_FNS(Freed, freed)
BUFFER_FNS(Shadow, shadow)
BUFFER_FNS(Verified, verified)
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+ return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+ return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_State, &bh->b_state);
+#else
+ spin_lock(&bh->b_state_lock);
+#endif
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_trylock(BH_State, &bh->b_state);
+#else
+ return spin_trylock(&bh->b_state_lock);
+#endif
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_is_locked(BH_State, &bh->b_state);
+#else
+ return spin_is_locked(&bh->b_state_lock);
+#endif
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_State, &bh->b_state);
+#else
+ spin_unlock(&bh->b_state_lock);
+#endif
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
+#else
+ spin_lock(&bh->b_journal_head_lock);
+#endif
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+#else
+ spin_unlock(&bh->b_journal_head_lock);
+#endif
+}
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -995,6 +1061,69 @@ struct journal_s
__u32 j_csum_seed;
};
+/* journal feature predicate functions */
+#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
+static inline bool jbd2_has_feature_##name(journal_t *j) \
+{ \
+ return ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_compat & \
+ cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \
+} \
+static inline void jbd2_set_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_compat |= \
+ cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \
+} \
+static inline void jbd2_clear_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_compat &= \
+ ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \
+}
+
+#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
+static inline bool jbd2_has_feature_##name(journal_t *j) \
+{ \
+ return ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_ro_compat & \
+ cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \
+} \
+static inline void jbd2_set_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_ro_compat |= \
+ cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \
+} \
+static inline void jbd2_clear_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_ro_compat &= \
+ ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \
+}
+
+#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \
+static inline bool jbd2_has_feature_##name(journal_t *j) \
+{ \
+ return ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_incompat & \
+ cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \
+} \
+static inline void jbd2_set_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_incompat |= \
+ cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \
+} \
+static inline void jbd2_clear_feature_##name(journal_t *j) \
+{ \
+ (j)->j_superblock->s_feature_incompat &= \
+ ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \
+}
+
+JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM)
+
+JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE)
+JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
+JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT)
+JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
+JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+
/*
* Journal flag definitions
*/
@@ -1007,6 +1136,7 @@ struct journal_s
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
+#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
/*
* Function declarations for the journaling transaction and buffer
@@ -1299,13 +1429,17 @@ static inline int tid_geq(tid_t x, tid_t y)
extern int jbd2_journal_blocks_per_page(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
+static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j)
+{
+ return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j);
+}
+
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
{
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
- JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
- return 1;
+ WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) &&
+ journal->j_chksum_driver == NULL);
- return 0;
+ return journal->j_chksum_driver != NULL;
}
/*
@@ -1405,4 +1539,7 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
#endif /* __KERNEL__ */
+#define EFSBADCRC EBADMSG /* Bad CRC detected */
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#endif /* _LINUX_JBD2_H */
diff --git a/kernel/include/linux/jbd_common.h b/kernel/include/linux/jbd_common.h
deleted file mode 100644
index a90a6f5ca..000000000
--- a/kernel/include/linux/jbd_common.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
- return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
- return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_State, &bh->b_state);
-#else
- spin_lock(&bh->b_state_lock);
-#endif
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_trylock(BH_State, &bh->b_state);
-#else
- return spin_trylock(&bh->b_state_lock);
-#endif
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_is_locked(BH_State, &bh->b_state);
-#else
- return spin_is_locked(&bh->b_state_lock);
-#endif
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_State, &bh->b_state);
-#else
- spin_unlock(&bh->b_state_lock);
-#endif
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_JournalHead, &bh->b_state);
-#else
- spin_lock(&bh->b_journal_head_lock);
-#endif
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
-#else
- spin_unlock(&bh->b_journal_head_lock);
-#endif
-}
-
-#endif
diff --git a/kernel/include/linux/jiffies.h b/kernel/include/linux/jiffies.h
index c367cbdf7..5fdc55312 100644
--- a/kernel/include/linux/jiffies.h
+++ b/kernel/include/linux/jiffies.h
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <generated/timeconst.h>
/*
* The following defines establish the engineering parameters of the PLL
@@ -288,11 +289,145 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
-extern unsigned long timespec_to_jiffies(const struct timespec *value);
-extern void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+
+ return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows.
+ * for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (__builtin_constant_p(m)) {
+ if ((int)m < 0)
+ return MAX_JIFFY_OFFSET;
+ return _msecs_to_jiffies(m);
+ } else {
+ return __msecs_to_jiffies(m);
+ }
+}
+
+extern unsigned long __usecs_to_jiffies(const unsigned int u);
+#if !(USEC_PER_SEC % HZ)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+}
+#else
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+ >> USEC_TO_HZ_SHR32;
+}
+#endif
+
+/**
+ * usecs_to_jiffies: - convert microseconds to jiffies
+ * @u: time in microseconds
+ *
+ * conversion is done as follows:
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows as for msecs_to_jiffies.
+ *
+ * usecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __usecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+ if (__builtin_constant_p(u)) {
+ if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return _usecs_to_jiffies(u);
+ } else {
+ return __usecs_to_jiffies(u);
+ }
+}
+
+extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
+extern void jiffies_to_timespec64(const unsigned long jiffies,
+ struct timespec64 *value);
+static inline unsigned long timespec_to_jiffies(const struct timespec *value)
+{
+ struct timespec64 ts = timespec_to_timespec64(*value);
+
+ return timespec64_to_jiffies(&ts);
+}
+
+static inline void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value)
+{
+ struct timespec64 ts;
+
+ jiffies_to_timespec64(jiffies, &ts);
+ *value = timespec64_to_timespec(ts);
+}
+
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
diff --git a/kernel/include/linux/jump_label.h b/kernel/include/linux/jump_label.h
index f4de473f2..0536524bb 100644
--- a/kernel/include/linux/jump_label.h
+++ b/kernel/include/linux/jump_label.h
@@ -5,19 +5,52 @@
* Jump label support
*
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
+ *
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_branch_likely()
+ * static_branch_unlikely()
*
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
+ *
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
*
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * In addition to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'.
*
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).
* OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +62,10 @@
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
- *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- * if (static_key_true(&my_key)) {
- * }
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
*
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
*/
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +109,8 @@ struct static_key {
#ifndef __ASSEMBLY__
enum jump_label_type {
- JUMP_LABEL_DISABLE = 0,
- JUMP_LABEL_ENABLE,
+ JUMP_LABEL_NOP = 0,
+ JUMP_LABEL_JMP,
};
struct module;
@@ -101,33 +124,18 @@ static inline int static_key_count(struct static_key *key)
#ifdef HAVE_JUMP_LABEL
-#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
-#define JUMP_LABEL_TYPE_MASK 1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
- return (struct jump_entry *)((unsigned long)key->entries
- & ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
- if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
- JUMP_LABEL_TYPE_TRUE_BRANCH)
- return true;
- return false;
-}
+#define JUMP_TYPE_FALSE 0UL
+#define JUMP_TYPE_TRUE 1UL
+#define JUMP_TYPE_MASK 1UL
static __always_inline bool static_key_false(struct static_key *key)
{
- return arch_static_branch(key);
+ return arch_static_branch(key, false);
}
static __always_inline bool static_key_true(struct static_key *key)
{
- return !static_key_false(key);
+ return !arch_static_branch(key, true);
}
extern struct jump_entry __start___jump_table[];
@@ -145,12 +153,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+#define STATIC_KEY_INIT_TRUE \
{ .enabled = ATOMIC_INIT(1), \
- .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE \
{ .enabled = ATOMIC_INIT(0), \
- .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+ .entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
@@ -198,21 +206,174 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
- { .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
- { .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
#endif /* HAVE_JUMP_LABEL */
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
-static inline bool static_key_enabled(struct static_key *key)
+static inline void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
{
- return static_key_count(key) > 0;
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
}
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+ struct static_key key;
+};
+
+struct static_key_false {
+ struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name) \
+ struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name) \
+ struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+extern bool ____wrong_branch_error(void);
+
+#define static_key_enabled(x) \
+({ \
+ if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
+ !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
+ !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ ____wrong_branch_error(); \
+ static_key_count((struct static_key *)x) > 0; \
+})
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch| likely (1) | unlikely (0)
+ * -----------+-----------------------+------------------
+ * | |
+ * true (1) | ... | ...
+ * | NOP | JMP L
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ * | |
+ * false (0) | ... | ...
+ * | JMP L | NOP
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ * enabled type branch instuction
+ * -----------------------------+-----------
+ * 0 0 0 | NOP
+ * 0 0 1 | JMP
+ * 0 1 0 | NOP
+ * 0 1 1 | JMP
+ *
+ * 1 0 0 | JMP
+ * 1 0 1 | NOP
+ * 1 1 0 | JMP
+ * 1 1 1 | NOP
+ *
+ * Which gives the following functions:
+ *
+ * dynamic: instruction = enabled ^ branch
+ * static: instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+#define static_branch_likely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = !arch_static_branch(&(x)->key, true); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = !arch_static_branch_jump(&(x)->key, true); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#define static_branch_unlikely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = arch_static_branch_jump(&(x)->key, false); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = arch_static_branch(&(x)->key, false); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_H */
#endif /* __ASSEMBLY__ */
diff --git a/kernel/include/linux/kasan.h b/kernel/include/linux/kasan.h
index 5486d777b..4b9f85c96 100644
--- a/kernel/include/linux/kasan.h
+++ b/kernel/include/linux/kasan.h
@@ -10,11 +10,19 @@ struct vm_struct;
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#include <asm/kasan.h>
+#include <asm/pgtable.h>
#include <linux/sched.h>
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/kernel/include/linux/kdev_t.h b/kernel/include/linux/kdev_t.h
index c838abe3e..052c7b32c 100644
--- a/kernel/include/linux/kdev_t.h
+++ b/kernel/include/linux/kdev_t.h
@@ -20,7 +20,7 @@
})
/* acceptable for old filesystems */
-static inline int old_valid_dev(dev_t dev)
+static inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
@@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val)
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline int new_valid_dev(dev_t dev)
+static inline bool new_valid_dev(dev_t dev)
{
return 1;
}
@@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev)
return MKDEV(major, minor);
}
-static inline int huge_valid_dev(dev_t dev)
-{
- return 1;
-}
-
static inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
diff --git a/kernel/include/linux/kernel.h b/kernel/include/linux/kernel.h
index 8ecfe3c52..2370991ae 100644
--- a/kernel/include/linux/kernel.h
+++ b/kernel/include/linux/kernel.h
@@ -204,28 +204,28 @@ extern int _cond_resched(void);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-/*
- * abs() handles unsigned and signed longs, ints, shorts and chars. For all
- * input types abs() returns a signed long.
- * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
- * for those.
+/**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first
+ * (s64, long or int depending on its size).
+ *
+ * Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
+ * otherwise it is signed long.
*/
-#define abs(x) ({ \
- long ret; \
- if (sizeof(x) == sizeof(long)) { \
- long __x = (x); \
- ret = (__x < 0) ? -__x : __x; \
- } else { \
- int __x = (x); \
- ret = (__x < 0) ? -__x : __x; \
- } \
- ret; \
- })
-
-#define abs64(x) ({ \
- s64 __x = (x); \
- (__x < 0) ? -__x : __x; \
- })
+#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
+ s64 __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ }), ({ \
+ long ret; \
+ if (sizeof(x) == sizeof(long)) { \
+ long __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } else { \
+ int __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } \
+ ret; \
+ }))
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
@@ -415,7 +415,10 @@ extern __printf(3, 0)
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
@@ -443,6 +446,9 @@ extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
@@ -538,12 +544,6 @@ bool mac_pton(const char *s, u8 *mac);
*
* Most likely, you want to use tracing_on/tracing_off.
*/
-#ifdef CONFIG_RING_BUFFER
-/* trace_off_permanent stops recording with no way to bring it back */
-void tracing_off_permanent(void);
-#else
-static inline void tracing_off_permanent(void) { }
-#endif
enum ftrace_dump_mode {
DUMP_NONE,
@@ -687,10 +687,10 @@ do { \
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
-extern int
+extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-extern int
+extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -710,7 +710,7 @@ int trace_printk(const char *fmt, ...)
{
return 0;
}
-static inline int
+static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
@@ -824,13 +824,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
-#define VERIFY_OCTAL_PERMISSIONS(perms) \
- (BUILD_BUG_ON_ZERO((perms) < 0) + \
- BUILD_BUG_ON_ZERO((perms) > 0777) + \
- /* User perms >= group perms >= other perms */ \
- BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
- BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
- /* Other writable? Generally considered a bad idea. */ \
- BUILD_BUG_ON_ZERO((perms) & 2) + \
+#define VERIFY_OCTAL_PERMISSIONS(perms) \
+ (BUILD_BUG_ON_ZERO((perms) < 0) + \
+ BUILD_BUG_ON_ZERO((perms) > 0777) + \
+ /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
+ BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
+ /* USER_WRITABLE >= GROUP_WRITABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
+ /* OTHER_WRITABLE? Generally considered a bad idea. */ \
+ BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
#endif
diff --git a/kernel/include/linux/kernfs.h b/kernel/include/linux/kernfs.h
index 29d1896c3..5d4e9c4b8 100644
--- a/kernel/include/linux/kernfs.h
+++ b/kernel/include/linux/kernfs.h
@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
+size_t kernfs_path_len(struct kernfs_node *kn);
char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -278,6 +279,7 @@ void kernfs_put(struct kernfs_node *kn);
struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
+struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
@@ -331,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
+static inline size_t kernfs_path_len(struct kernfs_node *kn)
+{ return 0; }
+
static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen)
{ return NULL; }
@@ -355,6 +360,10 @@ static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
{ return NULL; }
+static inline struct inode *
+kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
+{ return NULL; }
+
static inline struct kernfs_root *
kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
void *priv)
diff --git a/kernel/include/linux/kexec.h b/kernel/include/linux/kexec.h
index e804306ef..d140b1e9f 100644
--- a/kernel/include/linux/kexec.h
+++ b/kernel/include/linux/kexec.h
@@ -16,7 +16,7 @@
#include <uapi/linux/kexec.h>
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -318,12 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-#else /* !CONFIG_KEXEC */
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
+int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
+int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+ unsigned long buf_len);
+int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
+ Elf_Shdr *sechdrs, unsigned int relsec);
+int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ unsigned int relsec);
+
+#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
-#endif /* CONFIG_KEXEC */
+#define kexec_in_progress false
+#endif /* CONFIG_KEXEC_CORE */
#endif /* !defined(__ASSEBMLY__) */
diff --git a/kernel/include/linux/key-type.h b/kernel/include/linux/key-type.h
index ff9f1d394..7463355a1 100644
--- a/kernel/include/linux/key-type.h
+++ b/kernel/include/linux/key-type.h
@@ -40,8 +40,7 @@ struct key_construction {
*/
struct key_preparsed_payload {
char *description; /* Proposed key description (or NULL) */
- void *type_data[2]; /* Private key-type data */
- void *payload[2]; /* Proposed payload */
+ union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
diff --git a/kernel/include/linux/key.h b/kernel/include/linux/key.h
index e1d4715f3..66f705243 100644
--- a/kernel/include/linux/key.h
+++ b/kernel/include/linux/key.h
@@ -89,6 +89,11 @@ struct keyring_index_key {
size_t desc_len;
};
+union key_payload {
+ void __rcu *rcu_data0;
+ void *data[4];
+};
+
/*****************************************************************************/
/*
* key reference with possession attribute handling
@@ -186,28 +191,18 @@ struct key {
};
};
- /* type specific data
- * - this is used by the keyring type to index the name
- */
- union {
- struct list_head link;
- unsigned long x[2];
- void *p[2];
- int reject_error;
- } type_data;
-
/* key data
* - this is used to hold the data actually used in cryptography or
* whatever
*/
union {
- union {
- unsigned long value;
- void __rcu *rcudata;
- void *data;
- void *data2[2];
- } payload;
- struct assoc_array keys;
+ union key_payload payload;
+ struct {
+ /* Keyring bits */
+ struct list_head name_link;
+ struct assoc_array keys;
+ };
+ int reject_error;
};
};
@@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key)
}
#define rcu_dereference_key(KEY) \
- (rcu_dereference_protected((KEY)->payload.rcudata, \
+ (rcu_dereference_protected((KEY)->payload.rcu_data0, \
rwsem_is_locked(&((struct key *)(KEY))->sem)))
#define rcu_assign_keypointer(KEY, PAYLOAD) \
do { \
- rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
+ rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
#ifdef CONFIG_SYSCTL
diff --git a/kernel/include/linux/klist.h b/kernel/include/linux/klist.h
index 61e5b723a..953f283f8 100644
--- a/kernel/include/linux/klist.h
+++ b/kernel/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
#endif
diff --git a/kernel/include/linux/kmemleak.h b/kernel/include/linux/kmemleak.h
index d0a1f99e2..4894c6888 100644
--- a/kernel/include/linux/kmemleak.h
+++ b/kernel/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
-extern void kmemleak_init(void) __ref;
+extern void kmemleak_init(void) __init;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/kernel/include/linux/kmod.h b/kernel/include/linux/kmod.h
index 0555cc66a..fcfd2bf14 100644
--- a/kernel/include/linux/kmod.h
+++ b/kernel/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
UMH_DISABLED,
};
-extern void usermodehelper_init(void);
-
extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
diff --git a/kernel/include/linux/kobject.h b/kernel/include/linux/kobject.h
index 2d61b909f..e62845915 100644
--- a/kernel/include/linux/kobject.h
+++ b/kernel/include/linux/kobject.h
@@ -66,7 +66,7 @@ struct kobject {
struct kobject *parent;
struct kset *kset;
struct kobj_type *ktype;
- struct kernfs_node *sd;
+ struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
@@ -80,8 +80,9 @@ struct kobject {
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+extern __printf(2, 0)
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
diff --git a/kernel/include/linux/kprobes.h b/kernel/include/linux/kprobes.h
index 1ab54754a..8f6849084 100644
--- a/kernel/include/linux/kprobes.h
+++ b/kernel/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool within_kprobe_blacklist(unsigned long addr);
+
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
diff --git a/kernel/include/linux/kref.h b/kernel/include/linux/kref.h
index 484604d18..e15828fd7 100644
--- a/kernel/include/linux/kref.h
+++ b/kernel/include/linux/kref.h
@@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
-/**
- * kref_put_spinlock_irqsave - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
- * @lock: lock to take in release case
- *
- * Behaves identical to kref_put with one exception. If the reference count
- * drops to zero, the lock will be taken atomically wrt dropping the reference
- * count. The release function has to call spin_unlock() without _irqrestore.
- */
-static inline int kref_put_spinlock_irqsave(struct kref *kref,
- void (*release)(struct kref *kref),
- spinlock_t *lock)
-{
- unsigned long flags;
-
- WARN_ON(release == NULL);
- if (atomic_add_unless(&kref->refcount, -1, 1))
- return 0;
- spin_lock_irqsave(lock, flags);
- if (atomic_dec_and_test(&kref->refcount)) {
- release(kref);
- local_irq_restore(flags);
- return 1;
- }
- spin_unlock_irqrestore(lock, flags);
- return 0;
-}
-
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
diff --git a/kernel/include/linux/kthread.h b/kernel/include/linux/kthread.h
index 13d55206c..e691b6a23 100644
--- a/kernel/include/linux/kthread.h
+++ b/kernel/include/linux/kthread.h
@@ -11,7 +11,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
const char namefmt[], ...);
#define kthread_create(threadfn, data, namefmt, arg...) \
- kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+ kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
})
void kthread_bind(struct task_struct *k, unsigned int cpu);
+void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
diff --git a/kernel/include/linux/kvm_host.h b/kernel/include/linux/kvm_host.h
index 84df94a3d..c690acc69 100644
--- a/kernel/include/linux/kvm_host.h
+++ b/kernel/include/linux/kvm_host.h
@@ -24,6 +24,8 @@
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
+#include <linux/irqbypass.h>
+#include <linux/swait.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -44,6 +46,10 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
+#ifndef KVM_ADDRESS_SPACE_NUM
+#define KVM_ADDRESS_SPACE_NUM 1
+#endif
+
/*
* For the normal pfn, the highest 12 bits should be zero,
* so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -134,6 +140,10 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
+#define KVM_REQ_SMI 26
+#define KVM_REQ_HV_CRASH 27
+#define KVM_REQ_IOAPIC_EOI_EXIT 28
+#define KVM_REQ_HV_RESET 29
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -225,16 +235,21 @@ struct kvm_vcpu {
unsigned long requests;
unsigned long guest_debug;
+ int pre_pcpu;
+ struct list_head blocked_vcpu_list;
+
struct mutex mutex;
struct kvm_run *run;
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
- struct swait_head wq;
+ unsigned char fpu_counter;
+ struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
+ unsigned int halt_poll_ns;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
@@ -321,6 +336,18 @@ struct kvm_kernel_irq_routing_entry {
struct hlist_node link;
};
+#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+struct kvm_irq_routing_table {
+ int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
+ u32 nr_rt_entries;
+ /*
+ * Array indexed by gsi. Each entry contains list of irq chips
+ * the gsi is connected to.
+ */
+ struct hlist_head map[0];
+};
+#endif
+
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
@@ -329,6 +356,13 @@ struct kvm_kernel_irq_routing_entry {
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
+#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
@@ -347,12 +381,9 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots;
+ struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- u32 bsp_vcpu_id;
-#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
int last_boosted_vcpu;
@@ -411,8 +442,15 @@ struct kvm {
#define vcpu_unimpl(vcpu, fmt, ...) \
kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug(vcpu, fmt, ...) \
+ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
+ * the caller has read kvm->online_vcpus before (as is the case
+ * for kvm_for_each_vcpu, for example).
+ */
smp_rmb();
return kvm->vcpus[i];
}
@@ -423,6 +461,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++)
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu->vcpu_id == id)
+ return vcpu;
+ return NULL;
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
@@ -436,10 +485,14 @@ void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
}
+static inline void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -462,13 +515,25 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots,
+ return rcu_dereference_check(kvm->memslots[as_id],
srcu_read_lock_held(&kvm->srcu)
|| lockdep_is_held(&kvm->slots_lock));
}
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return __kvm_memslots(kvm, 0);
+}
+
+static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
+{
+ int as_id = kvm_arch_vcpu_memslots_id(vcpu);
+
+ return __kvm_memslots(vcpu->kvm, as_id);
+}
+
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
@@ -500,21 +565,22 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm);
+void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
enum kvm_mr_change change);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
@@ -524,8 +590,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages);
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -538,13 +604,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
-pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
+pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
+ bool *async, bool write_fault, bool *writable);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -573,7 +639,28 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -689,8 +776,26 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
-static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
+static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
@@ -734,10 +839,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status);
-int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level, bool line_status);
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status);
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
+void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
@@ -762,16 +870,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-static inline void kvm_guest_enter(void)
+/* must be called with irqs disabled */
+static inline void __kvm_guest_enter(void)
{
- unsigned long flags;
-
- BUG_ON(preemptible());
-
- local_irq_save(flags);
guest_enter();
- local_irq_restore(flags);
-
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspace from rcu point of view. In
@@ -783,12 +885,27 @@ static inline void kvm_guest_enter(void)
rcu_virt_note_context_switch(smp_processor_id());
}
+/* must be called with irqs disabled */
+static inline void __kvm_guest_exit(void)
+{
+ guest_exit();
+}
+
+static inline void kvm_guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __kvm_guest_enter();
+ local_irq_restore(flags);
+}
+
static inline void kvm_guest_exit(void)
{
unsigned long flags;
local_irq_save(flags);
- guest_exit();
+ __kvm_guest_exit();
local_irq_restore(flags);
}
@@ -924,6 +1041,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#endif
int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_setup_empty_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
@@ -983,22 +1101,9 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
-}
-
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
-}
-
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-
#else
-
static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-
#endif
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
@@ -1079,5 +1184,16 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
-#endif
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
+int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+
+#endif
diff --git a/kernel/include/linux/kvm_irqfd.h b/kernel/include/linux/kvm_irqfd.h
new file mode 100644
index 000000000..0c1de0509
--- /dev/null
+++ b/kernel/include/linux/kvm_irqfd.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest
+ * Credit goes to Avi Kivity for the original idea.
+ */
+
+#ifndef __LINUX_KVM_IRQFD_H
+#define __LINUX_KVM_IRQFD_H
+
+#include <linux/kvm_host.h>
+#include <linux/poll.h>
+
+/*
+ * Resampling irqfds are a special variety of irqfds used to emulate
+ * level triggered interrupts. The interrupt is asserted on eventfd
+ * trigger. On acknowledgment through the irq ack notifier, the
+ * interrupt is de-asserted and userspace is notified through the
+ * resamplefd. All resamplers on the same gsi are de-asserted
+ * together, so we don't need to track the state of each individual
+ * user. We can also therefore share the same irq source ID.
+ */
+struct kvm_kernel_irqfd_resampler {
+ struct kvm *kvm;
+ /*
+ * List of resampling struct _irqfd objects sharing this gsi.
+ * RCU list modified under kvm->irqfds.resampler_lock
+ */
+ struct list_head list;
+ struct kvm_irq_ack_notifier notifier;
+ /*
+ * Entry in list of kvm->irqfd.resampler_list. Use for sharing
+ * resamplers among irqfds on the same gsi.
+ * Accessed and modified under kvm->irqfds.resampler_lock
+ */
+ struct list_head link;
+};
+
+struct kvm_kernel_irqfd {
+ /* Used for MSI fast-path */
+ struct kvm *kvm;
+ wait_queue_t wait;
+ /* Update side is protected by irqfds.lock */
+ struct kvm_kernel_irq_routing_entry irq_entry;
+ seqcount_t irq_entry_sc;
+ /* Used for level IRQ fast-path */
+ int gsi;
+ struct work_struct inject;
+ /* The resampler used by this irqfd (resampler-only) */
+ struct kvm_kernel_irqfd_resampler *resampler;
+ /* Eventfd notified on resample (resampler-only) */
+ struct eventfd_ctx *resamplefd;
+ /* Entry in list of irqfds for a resampler (resampler-only) */
+ struct list_head resampler_link;
+ /* Used for setup/shutdown */
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct irq_bypass_consumer consumer;
+ struct irq_bypass_producer *producer;
+};
+
+#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/kernel/include/linux/kvm_types.h b/kernel/include/linux/kvm_types.h
index 931da7e91..1b47a185c 100644
--- a/kernel/include/linux/kvm_types.h
+++ b/kernel/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
+struct kvm_memslots;
enum kvm_mr_change;
diff --git a/kernel/include/linux/leds.h b/kernel/include/linux/leds.h
index 9a2b00009..fa359c79c 100644
--- a/kernel/include/linux/leds.h
+++ b/kernel/include/linux/leds.h
@@ -12,6 +12,7 @@
#ifndef __LINUX_LEDS_H_INCLUDED
#define __LINUX_LEDS_H_INCLUDED
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
@@ -222,6 +223,11 @@ struct led_trigger {
struct list_head next_trig;
};
+ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
/* Registration functions for complex triggers */
extern int led_trigger_register(struct led_trigger *trigger);
extern void led_trigger_unregister(struct led_trigger *trigger);
@@ -238,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_on,
unsigned long *delay_off,
int invert);
+extern void led_trigger_set_default(struct led_classdev *led_cdev);
+extern void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
+extern void led_trigger_remove(struct led_classdev *led_cdev);
+
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+ return led_cdev->trigger_data;
+}
+
/**
* led_trigger_rename_static - rename a trigger
* @name: the new trigger name
@@ -267,6 +283,22 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off) {}
+static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert) {}
+static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
+static inline void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger) {}
+static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_LEDS_TRIGGERS */
/* Trigger specific functions */
diff --git a/kernel/include/linux/lglock.h b/kernel/include/linux/lglock.h
index 9603a1500..6f035f635 100644
--- a/kernel/include/linux/lglock.h
+++ b/kernel/include/linux/lglock.h
@@ -34,10 +34,10 @@
#endif
struct lglock {
-#ifndef CONFIG_PREEMPT_RT_FULL
- arch_spinlock_t __percpu *lock;
-#else
+#ifdef CONFIG_PREEMPT_RT_FULL
struct rt_mutex __percpu *lock;
+#else
+ arch_spinlock_t __percpu *lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
@@ -45,34 +45,40 @@ struct lglock {
#endif
};
-#ifndef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT_FULL
# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
struct lglock name = { .lock = &name ## _lock }
# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
static struct lglock name = { .lock = &name ## _lock }
+
#else
-# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
- = __RT_MUTEX_INITIALIZER( name ## _lock); \
+#define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
struct lglock name = { .lock = &name ## _lock }
-# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
- = __RT_MUTEX_INITIALIZER( name ## _lock); \
+#define DEFINE_STATIC_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
static struct lglock name = { .lock = &name ## _lock }
#endif
void lg_lock_init(struct lglock *lg, char *name);
+
void lg_local_lock(struct lglock *lg);
void lg_local_unlock(struct lglock *lg);
void lg_local_lock_cpu(struct lglock *lg, int cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
diff --git a/kernel/include/linux/libata.h b/kernel/include/linux/libata.h
index e0e33787c..b20a2752f 100644
--- a/kernel/include/linux/libata.h
+++ b/kernel/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
ATA_SHT_EMULATED = 1,
- ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
@@ -211,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
@@ -255,6 +255,7 @@ enum {
ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
+ ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
@@ -717,7 +718,7 @@ struct ata_device {
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
- };
+ } ____cacheline_aligned;
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
@@ -1367,7 +1368,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
.use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
diff --git a/kernel/include/linux/libfdt_env.h b/kernel/include/linux/libfdt_env.h
index 01508c7b8..2a663c6bb 100644
--- a/kernel/include/linux/libfdt_env.h
+++ b/kernel/include/linux/libfdt_env.h
@@ -5,6 +5,10 @@
#include <asm/byteorder.h>
+typedef __be16 fdt16_t;
+typedef __be32 fdt32_t;
+typedef __be64 fdt64_t;
+
#define fdt32_to_cpu(x) be32_to_cpu(x)
#define cpu_to_fdt32(x) cpu_to_be32(x)
#define fdt64_to_cpu(x) be64_to_cpu(x)
diff --git a/kernel/include/linux/libnvdimm.h b/kernel/include/linux/libnvdimm.h
new file mode 100644
index 000000000..3f021dc5d
--- /dev/null
+++ b/kernel/include/linux/libnvdimm.h
@@ -0,0 +1,155 @@
+/*
+ * libnvdimm - Non-volatile-memory Devices Subsystem
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __LIBNVDIMM_H__
+#define __LIBNVDIMM_H__
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+enum {
+ /* when a dimm supports both PMEM and BLK access a label is required */
+ NDD_ALIASING = 1 << 0,
+ /* unarmed memory devices may not persist writes */
+ NDD_UNARMED = 1 << 1,
+
+ /* need to set a limit somewhere, but yes, this is likely overkill */
+ ND_IOCTL_MAX_BUFLEN = SZ_4M,
+ ND_CMD_MAX_ELEM = 4,
+ ND_CMD_MAX_ENVELOPE = 16,
+ ND_CMD_ARS_STATUS_MAX = SZ_4K,
+ ND_MAX_MAPPINGS = 32,
+
+ /* region flag indicating to direct-map persistent memory by default */
+ ND_REGION_PAGEMAP = 0,
+
+ /* mark newly adjusted resources as requiring a label update */
+ DPA_RESOURCE_ADJUSTED = 1 << 0,
+};
+
+extern struct attribute_group nvdimm_bus_attribute_group;
+extern struct attribute_group nvdimm_attribute_group;
+extern struct attribute_group nd_device_attribute_group;
+extern struct attribute_group nd_numa_attribute_group;
+extern struct attribute_group nd_region_attribute_group;
+extern struct attribute_group nd_mapping_attribute_group;
+
+struct nvdimm;
+struct nvdimm_bus_descriptor;
+typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len);
+
+struct nd_namespace_label;
+struct nvdimm_drvdata;
+struct nd_mapping {
+ struct nvdimm *nvdimm;
+ struct nd_namespace_label **labels;
+ u64 start;
+ u64 size;
+ /*
+ * @ndd is for private use at region enable / disable time for
+ * get_ndd() + put_ndd(), all other nd_mapping to ndd
+ * conversions use to_ndd() which respects enabled state of the
+ * nvdimm.
+ */
+ struct nvdimm_drvdata *ndd;
+};
+
+struct nvdimm_bus_descriptor {
+ const struct attribute_group **attr_groups;
+ unsigned long dsm_mask;
+ char *provider_name;
+ ndctl_fn ndctl;
+};
+
+struct nd_cmd_desc {
+ int in_num;
+ int out_num;
+ u32 in_sizes[ND_CMD_MAX_ELEM];
+ int out_sizes[ND_CMD_MAX_ELEM];
+};
+
+struct nd_interleave_set {
+ u64 cookie;
+};
+
+struct nd_region_desc {
+ struct resource *res;
+ struct nd_mapping *nd_mapping;
+ u16 num_mappings;
+ const struct attribute_group **attr_groups;
+ struct nd_interleave_set *nd_set;
+ void *provider_data;
+ int num_lanes;
+ int numa_node;
+ unsigned long flags;
+};
+
+struct nvdimm_bus;
+struct module;
+struct device;
+struct nd_blk_region;
+struct nd_blk_region_desc {
+ int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
+ void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
+ int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
+ void *iobuf, u64 len, int rw);
+ struct nd_region_desc ndr_desc;
+};
+
+static inline struct nd_blk_region_desc *to_blk_region_desc(
+ struct nd_region_desc *ndr_desc)
+{
+ return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
+
+}
+
+struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
+ struct nvdimm_bus_descriptor *nfit_desc, struct module *module);
+#define nvdimm_bus_register(parent, desc) \
+ __nvdimm_bus_register(parent, desc, THIS_MODULE)
+void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
+struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
+struct nvdimm *to_nvdimm(struct device *dev);
+struct nd_region *to_nd_region(struct device *dev);
+struct nd_blk_region *to_nd_blk_region(struct device *dev);
+struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
+const char *nvdimm_name(struct nvdimm *nvdimm);
+void *nvdimm_provider_data(struct nvdimm *nvdimm);
+struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
+ const struct attribute_group **groups, unsigned long flags,
+ unsigned long *dsm_mask);
+const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
+const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
+u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
+ const struct nd_cmd_desc *desc, int idx, void *buf);
+u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
+ const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
+ const u32 *out_field);
+int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
+struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+void *nd_region_provider_data(struct nd_region *nd_region);
+void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
+void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
+struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
+unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
+void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
+u64 nd_fletcher64(void *addr, size_t len, bool le);
+#endif /* __LIBNVDIMM_H__ */
diff --git a/kernel/include/linux/lightnvm.h b/kernel/include/linux/lightnvm.h
new file mode 100644
index 000000000..034117b3b
--- /dev/null
+++ b/kernel/include/linux/lightnvm.h
@@ -0,0 +1,441 @@
+#ifndef NVM_H
+#define NVM_H
+
+enum {
+ NVM_IO_OK = 0,
+ NVM_IO_REQUEUE = 1,
+ NVM_IO_DONE = 2,
+ NVM_IO_ERR = 3,
+
+ NVM_IOTYPE_NONE = 0,
+ NVM_IOTYPE_GC = 1,
+};
+
+#ifdef CONFIG_NVM
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/dmapool.h>
+
+enum {
+ /* HW Responsibilities */
+ NVM_RSP_L2P = 1 << 0,
+ NVM_RSP_ECC = 1 << 1,
+
+ /* Physical Adressing Mode */
+ NVM_ADDRMODE_LINEAR = 0,
+ NVM_ADDRMODE_CHANNEL = 1,
+
+ /* Plane programming mode for LUN */
+ NVM_PLANE_SINGLE = 0,
+ NVM_PLANE_DOUBLE = 1,
+ NVM_PLANE_QUAD = 2,
+
+ /* Status codes */
+ NVM_RSP_SUCCESS = 0x0,
+ NVM_RSP_NOT_CHANGEABLE = 0x1,
+ NVM_RSP_ERR_FAILWRITE = 0x40ff,
+ NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
+
+ /* Device opcodes */
+ NVM_OP_HBREAD = 0x02,
+ NVM_OP_HBWRITE = 0x81,
+ NVM_OP_PWRITE = 0x91,
+ NVM_OP_PREAD = 0x92,
+ NVM_OP_ERASE = 0x90,
+
+ /* PPA Command Flags */
+ NVM_IO_SNGL_ACCESS = 0x0,
+ NVM_IO_DUAL_ACCESS = 0x1,
+ NVM_IO_QUAD_ACCESS = 0x2,
+
+ /* NAND Access Modes */
+ NVM_IO_SUSPEND = 0x80,
+ NVM_IO_SLC_MODE = 0x100,
+ NVM_IO_SCRAMBLE_DISABLE = 0x200,
+
+ /* Block Types */
+ NVM_BLK_T_FREE = 0x0,
+ NVM_BLK_T_BAD = 0x1,
+ NVM_BLK_T_DEV = 0x2,
+ NVM_BLK_T_HOST = 0x4,
+};
+
+struct nvm_id_group {
+ u8 mtype;
+ u8 fmtype;
+ u8 num_ch;
+ u8 num_lun;
+ u8 num_pln;
+ u16 num_blk;
+ u16 num_pg;
+ u16 fpg_sz;
+ u16 csecs;
+ u16 sos;
+ u32 trdt;
+ u32 trdm;
+ u32 tprt;
+ u32 tprm;
+ u32 tbet;
+ u32 tbem;
+ u32 mpos;
+ u32 mccap;
+ u16 cpar;
+};
+
+struct nvm_addr_format {
+ u8 ch_offset;
+ u8 ch_len;
+ u8 lun_offset;
+ u8 lun_len;
+ u8 pln_offset;
+ u8 pln_len;
+ u8 blk_offset;
+ u8 blk_len;
+ u8 pg_offset;
+ u8 pg_len;
+ u8 sect_offset;
+ u8 sect_len;
+};
+
+struct nvm_id {
+ u8 ver_id;
+ u8 vmnt;
+ u8 cgrps;
+ u32 cap;
+ u32 dom;
+ struct nvm_addr_format ppaf;
+ struct nvm_id_group groups[4];
+} __packed;
+
+struct nvm_target {
+ struct list_head list;
+ struct nvm_tgt_type *type;
+ struct gendisk *disk;
+};
+
+struct nvm_tgt_instance {
+ struct nvm_tgt_type *tt;
+};
+
+#define ADDR_EMPTY (~0ULL)
+
+#define NVM_VERSION_MAJOR 1
+#define NVM_VERSION_MINOR 0
+#define NVM_VERSION_PATCH 0
+
+#define NVM_BLK_BITS (16)
+#define NVM_PG_BITS (16)
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS (8)
+#define NVM_LUN_BITS (8)
+#define NVM_CH_BITS (8)
+
+struct ppa_addr {
+ /* Generic structure for all addresses */
+ union {
+ struct {
+ u64 blk : NVM_BLK_BITS;
+ u64 pg : NVM_PG_BITS;
+ u64 sec : NVM_SEC_BITS;
+ u64 pl : NVM_PL_BITS;
+ u64 lun : NVM_LUN_BITS;
+ u64 ch : NVM_CH_BITS;
+ } g;
+
+ u64 ppa;
+ };
+};
+
+struct nvm_rq {
+ struct nvm_tgt_instance *ins;
+ struct nvm_dev *dev;
+
+ struct bio *bio;
+
+ union {
+ struct ppa_addr ppa_addr;
+ dma_addr_t dma_ppa_list;
+ };
+
+ struct ppa_addr *ppa_list;
+
+ void *metadata;
+ dma_addr_t dma_metadata;
+
+ uint8_t opcode;
+ uint16_t nr_pages;
+ uint16_t flags;
+};
+
+static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
+{
+ return pdu - sizeof(struct nvm_rq);
+}
+
+static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
+{
+ return rqdata + 1;
+}
+
+struct nvm_block;
+
+typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
+typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
+typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
+ nvm_l2p_update_fn *, void *);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
+ nvm_bb_update_fn *, void *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+typedef void (nvm_destroy_dma_pool_fn)(void *);
+typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
+ dma_addr_t *);
+typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
+
+struct nvm_dev_ops {
+ nvm_id_fn *identity;
+ nvm_get_l2p_tbl_fn *get_l2p_tbl;
+ nvm_op_bb_tbl_fn *get_bb_tbl;
+ nvm_op_set_bb_fn *set_bb_tbl;
+
+ nvm_submit_io_fn *submit_io;
+ nvm_erase_blk_fn *erase_block;
+
+ nvm_create_dma_pool_fn *create_dma_pool;
+ nvm_destroy_dma_pool_fn *destroy_dma_pool;
+ nvm_dev_dma_alloc_fn *dev_dma_alloc;
+ nvm_dev_dma_free_fn *dev_dma_free;
+
+ unsigned int max_phys_sect;
+};
+
+struct nvm_lun {
+ int id;
+
+ int lun_id;
+ int chnl_id;
+
+ unsigned int nr_inuse_blocks; /* Number of used blocks */
+ unsigned int nr_free_blocks; /* Number of unused blocks */
+ unsigned int nr_bad_blocks; /* Number of bad blocks */
+ struct nvm_block *blocks;
+
+ spinlock_t lock;
+};
+
+struct nvm_block {
+ struct list_head list;
+ struct nvm_lun *lun;
+ unsigned long id;
+
+ void *priv;
+ int type;
+};
+
+struct nvm_dev {
+ struct nvm_dev_ops *ops;
+
+ struct list_head devices;
+ struct list_head online_targets;
+
+ /* Media manager */
+ struct nvmm_type *mt;
+ void *mp;
+
+ /* Device information */
+ int nr_chnls;
+ int nr_planes;
+ int luns_per_chnl;
+ int sec_per_pg; /* only sectors for a single page */
+ int pgs_per_blk;
+ int blks_per_lun;
+ int sec_size;
+ int oob_size;
+ struct nvm_addr_format ppaf;
+
+ /* Calculated/Cached values. These do not reflect the actual usable
+ * blocks at run-time.
+ */
+ int max_rq_size;
+ int plane_mode; /* drive device in single, double or quad mode */
+
+ int sec_per_pl; /* all sectors across planes */
+ int sec_per_blk;
+ int sec_per_lun;
+
+ unsigned long total_pages;
+ unsigned long total_blocks;
+ int nr_luns;
+ unsigned max_pages_per_blk;
+
+ void *ppalist_pool;
+
+ struct nvm_id identity;
+
+ /* Backend device */
+ struct request_queue *q;
+ char name[DISK_NAME_LEN];
+};
+
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
+{
+ struct ppa_addr l;
+
+ l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
+ l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
+ l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
+ l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
+ l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
+ l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
+
+ return l;
+}
+
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
+{
+ struct ppa_addr l;
+
+ /*
+ * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
+ */
+ l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
+ (((1 << dev->ppaf.blk_len) - 1));
+ l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
+ (((1 << dev->ppaf.pg_len) - 1));
+ l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
+ (((1 << dev->ppaf.sect_len) - 1));
+ l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
+ (((1 << dev->ppaf.pln_len) - 1));
+ l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
+ (((1 << dev->ppaf.lun_len) - 1));
+ l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
+ (((1 << dev->ppaf.ch_len) - 1));
+
+ return l;
+}
+
+static inline int ppa_empty(struct ppa_addr ppa_addr)
+{
+ return (ppa_addr.ppa == ADDR_EMPTY);
+}
+
+static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
+{
+ ppa_addr->ppa = ADDR_EMPTY;
+}
+
+static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
+ struct nvm_block *blk)
+{
+ struct ppa_addr ppa;
+ struct nvm_lun *lun = blk->lun;
+
+ ppa.ppa = 0;
+ ppa.g.blk = blk->id % dev->blks_per_lun;
+ ppa.g.lun = lun->lun_id;
+ ppa.g.ch = lun->chnl_id;
+
+ return ppa;
+}
+
+typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
+typedef sector_t (nvm_tgt_capacity_fn)(void *);
+typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void (nvm_tgt_exit_fn)(void *);
+
+struct nvm_tgt_type {
+ const char *name;
+ unsigned int version[3];
+
+ /* target entry points */
+ nvm_tgt_make_rq_fn *make_rq;
+ nvm_tgt_capacity_fn *capacity;
+ nvm_tgt_end_io_fn *end_io;
+
+ /* module-specific init/teardown */
+ nvm_tgt_init_fn *init;
+ nvm_tgt_exit_fn *exit;
+
+ /* For internal use */
+ struct list_head list;
+};
+
+extern int nvm_register_target(struct nvm_tgt_type *);
+extern void nvm_unregister_target(struct nvm_tgt_type *);
+
+extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
+extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
+
+typedef int (nvmm_register_fn)(struct nvm_dev *);
+typedef void (nvmm_unregister_fn)(struct nvm_dev *);
+typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
+ struct nvm_lun *, unsigned long);
+typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
+typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
+ unsigned long);
+typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
+
+struct nvmm_type {
+ const char *name;
+ unsigned int version[3];
+
+ nvmm_register_fn *register_mgr;
+ nvmm_unregister_fn *unregister_mgr;
+
+ /* Block administration callbacks */
+ nvmm_get_blk_fn *get_blk;
+ nvmm_put_blk_fn *put_blk;
+ nvmm_open_blk_fn *open_blk;
+ nvmm_close_blk_fn *close_blk;
+ nvmm_flush_blk_fn *flush_blk;
+
+ nvmm_submit_io_fn *submit_io;
+ nvmm_end_io_fn *end_io;
+ nvmm_erase_blk_fn *erase_blk;
+
+ /* Configuration management */
+ nvmm_get_lun_fn *get_lun;
+
+ /* Statistics */
+ nvmm_lun_info_print_fn *lun_info_print;
+ struct list_head list;
+};
+
+extern int nvm_register_mgr(struct nvmm_type *);
+extern void nvm_unregister_mgr(struct nvmm_type *);
+
+extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
+ unsigned long);
+extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
+
+extern int nvm_register(struct request_queue *, char *,
+ struct nvm_dev_ops *);
+extern void nvm_unregister(char *);
+
+extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+#else /* CONFIG_NVM */
+struct nvm_dev_ops;
+
+static inline int nvm_register(struct request_queue *q, char *disk_name,
+ struct nvm_dev_ops *ops)
+{
+ return -EINVAL;
+}
+static inline void nvm_unregister(char *disk_name) {}
+#endif /* CONFIG_NVM */
+#endif /* LIGHTNVM.H */
diff --git a/kernel/include/linux/list.h b/kernel/include/linux/list.h
index feb773c76..993395a2e 100644
--- a/kernel/include/linux/list.h
+++ b/kernel/include/linux/list.h
@@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
- prev->next = next;
+ WRITE_ONCE(prev->next, next);
}
/**
@@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
- *pprev = next;
+
+ WRITE_ONCE(*pprev, next);
if (next)
next->pprev = pprev;
}
@@ -672,6 +673,11 @@ static inline void hlist_add_fake(struct hlist_node *n)
n->pprev = &n->next;
}
+static inline bool hlist_fake(struct hlist_node *h)
+{
+ return h->pprev == &h->next;
+}
+
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
diff --git a/kernel/include/linux/list_bl.h b/kernel/include/linux/list_bl.h
index d8876a0cf..89ffaa7bd 100644
--- a/kernel/include/linux/list_bl.h
+++ b/kernel/include/linux/list_bl.h
@@ -42,13 +42,15 @@ struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};
-static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
-{
- h->first = NULL;
#ifdef CONFIG_PREEMPT_RT_BASE
- raw_spin_lock_init(&h->lock);
+#define INIT_HLIST_BL_HEAD(h) \
+do { \
+ (h)->first = NULL; \
+ raw_spin_lock_init(&(h)->lock); \
+} while (0)
+#else
+#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
#endif
-}
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
@@ -103,9 +105,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n)
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
/* pprev may be `first`, so be careful not to lose the lock bit */
- *pprev = (struct hlist_bl_node *)
+ WRITE_ONCE(*pprev,
+ (struct hlist_bl_node *)
((unsigned long)next |
- ((unsigned long)*pprev & LIST_BL_LOCKMASK));
+ ((unsigned long)*pprev & LIST_BL_LOCKMASK)));
if (next)
next->pprev = pprev;
}
diff --git a/kernel/include/linux/list_nulls.h b/kernel/include/linux/list_nulls.h
index f266661d2..444d2b131 100644
--- a/kernel/include/linux/list_nulls.h
+++ b/kernel/include/linux/list_nulls.h
@@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
struct hlist_nulls_node **pprev = n->pprev;
- *pprev = next;
+
+ WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
next->pprev = pprev;
}
diff --git a/kernel/include/linux/livepatch.h b/kernel/include/linux/livepatch.h
index ee6dbb39a..31db7a05d 100644
--- a/kernel/include/linux/livepatch.h
+++ b/kernel/include/linux/livepatch.h
@@ -99,7 +99,7 @@ struct klp_object {
struct klp_func *funcs;
/* internal */
- struct kobject *kobj;
+ struct kobject kobj;
struct module *mod;
enum klp_state state;
};
@@ -123,6 +123,12 @@ struct klp_patch {
enum klp_state state;
};
+#define klp_for_each_object(patch, obj) \
+ for (obj = patch->objs; obj->funcs; obj++)
+
+#define klp_for_each_func(obj, func) \
+ for (func = obj->funcs; func->old_name; func++)
+
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
diff --git a/kernel/include/linux/llist.h b/kernel/include/linux/llist.h
index fbf10a0bc..fd4ca0b4f 100644
--- a/kernel/include/linux/llist.h
+++ b/kernel/include/linux/llist.h
@@ -55,8 +55,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
-#include <asm/cmpxchg.h>
struct llist_head {
struct llist_node *first;
diff --git a/kernel/include/linux/locallock.h b/kernel/include/linux/locallock.h
index 339ba00ad..6fe5928fc 100644
--- a/kernel/include/linux/locallock.h
+++ b/kernel/include/linux/locallock.h
@@ -43,9 +43,9 @@ struct local_irq_lock {
* for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
*/
#ifdef CONFIG_PREEMPT_RT_FULL
-# define spin_lock_local(lock) rt_spin_lock(lock)
-# define spin_trylock_local(lock) rt_spin_trylock(lock)
-# define spin_unlock_local(lock) rt_spin_unlock(lock)
+# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
+# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
+# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
#else
# define spin_lock_local(lock) spin_lock(lock)
# define spin_trylock_local(lock) spin_trylock(lock)
diff --git a/kernel/include/linux/lockd/lockd.h b/kernel/include/linux/lockd/lockd.h
index ff82a3287..c15373894 100644
--- a/kernel/include/linux/lockd/lockd.h
+++ b/kernel/include/linux/lockd/lockd.h
@@ -68,6 +68,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
+ char nodename[UNX_MAXNODENAME + 1];
};
/*
@@ -235,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
struct nlm_host * nlm_get_host(struct nlm_host *);
void nlm_shutdown_hosts(void);
void nlm_shutdown_hosts_net(struct net *net);
-void nlm_host_rebooted(const struct nlm_reboot *);
+void nlm_host_rebooted(const struct net *net,
+ const struct nlm_reboot *);
/*
* Host monitoring
@@ -243,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
int nsm_monitor(const struct nlm_host *host);
void nsm_unmonitor(const struct nlm_host *host);
-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+struct nsm_handle *nsm_get_handle(const struct net *net,
+ const struct sockaddr *sap,
const size_t salen,
const char *hostname,
const size_t hostname_len);
-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
+struct nsm_handle *nsm_reboot_lookup(const struct net *net,
+ const struct nlm_reboot *info);
void nsm_release(struct nsm_handle *nsm);
/*
diff --git a/kernel/include/linux/lockdep.h b/kernel/include/linux/lockdep.h
index 066ba4157..c57e424d9 100644
--- a/kernel/include/linux/lockdep.h
+++ b/kernel/include/linux/lockdep.h
@@ -2,7 +2,7 @@
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* see Documentation/locking/lockdep-design.txt for more details.
*/
@@ -130,8 +130,8 @@ enum bounce_type {
};
struct lock_class_stats {
- unsigned long contention_point[4];
- unsigned long contending_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -255,6 +255,7 @@ struct held_lock {
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
+ unsigned int pin_count;
};
/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
+extern void lock_pin_lock(struct lockdep_map *lock);
+extern void lock_unpin_lock(struct lockdep_map *lock);
+
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
+#define lockdep_pin_lock(l) do { (void)(l); } while (0)
+#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
diff --git a/kernel/include/linux/lsm_audit.h b/kernel/include/linux/lsm_audit.h
index 1cc89e9df..ffb9c9da4 100644
--- a/kernel/include/linux/lsm_audit.h
+++ b/kernel/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
} fam;
};
+struct lsm_ioctlop_audit {
+ struct path path;
+ u16 cmd;
+};
+
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_KMOD 8
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
+#define LSM_AUDIT_DATA_IOCTL_OP 11
union {
struct path path;
struct dentry *dentry;
@@ -68,6 +74,7 @@ struct common_audit_data {
} key_struct;
#endif
char *kmod_name;
+ struct lsm_ioctlop_audit *op;
} u;
/* this union contains LSM specific data */
union {
diff --git a/kernel/include/linux/lsm_hooks.h b/kernel/include/linux/lsm_hooks.h
new file mode 100644
index 000000000..ec3a6bab2
--- /dev/null
+++ b/kernel/include/linux/lsm_hooks.h
@@ -0,0 +1,1890 @@
+/*
+ * Linux Security Module interfaces
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Due to this file being licensed under the GPL there is controversy over
+ * whether this permits you to write a module that #includes this file
+ * without placing your module under the GPL. Please consult a lawyer for
+ * advice before doing this.
+ *
+ */
+
+#ifndef __LINUX_LSM_HOOKS_H
+#define __LINUX_LSM_HOOKS_H
+
+#include <linux/security.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+
+/**
+ * Security hooks for program execution operations.
+ *
+ * @bprm_set_creds:
+ * Save security information in the bprm->security field, typically based
+ * on information about the bprm->file, for later use by the apply_creds
+ * hook. This hook may also optionally check permissions (e.g. for
+ * transitions between security domains).
+ * This hook may be called multiple times during a single execve, e.g. for
+ * interpreters. The hook can tell whether it has already been called by
+ * checking to see if @bprm->security is non-NULL. If so, then the hook
+ * may decide either to retain the security information saved earlier or
+ * to replace it.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_check_security:
+ * This hook mediates the point when a search for a binary handler will
+ * begin. It allows a check the @bprm->security value which is set in the
+ * preceding set_creds call. The primary difference from set_creds is
+ * that the argv list and envp list are reliably available in @bprm. This
+ * hook may be called multiple times during a single execve; and in each
+ * pass set_creds is called first.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_committing_creds:
+ * Prepare to install the new security attributes of a process being
+ * transformed by an execve operation, based on the old credentials
+ * pointed to by @current->cred and the information set in @bprm->cred by
+ * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
+ * This hook is a good place to perform state changes on the process such
+ * as closing open file descriptors to which access will no longer be
+ * granted when the attributes are changed. This is called immediately
+ * before commit_creds().
+ * @bprm_committed_creds:
+ * Tidy up after the installation of the new security attributes of a
+ * process being transformed by an execve operation. The new credentials
+ * have, by this point, been set to @current->cred. @bprm points to the
+ * linux_binprm structure. This hook is a good place to perform state
+ * changes on the process such as clearing out non-inheritable signal
+ * state. This is called immediately after commit_creds().
+ * @bprm_secureexec:
+ * Return a boolean value (0 or 1) indicating whether a "secure exec"
+ * is required. The flag is passed in the auxiliary table
+ * on the initial stack to the ELF interpreter to indicate whether libc
+ * should enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ *
+ * Security hooks for filesystem operations.
+ *
+ * @sb_alloc_security:
+ * Allocate and attach a security structure to the sb->s_security field.
+ * The s_security field is initialized to NULL when the structure is
+ * allocated.
+ * @sb contains the super_block structure to be modified.
+ * Return 0 if operation was successful.
+ * @sb_free_security:
+ * Deallocate and clear the sb->s_security field.
+ * @sb contains the super_block structure to be modified.
+ * @sb_statfs:
+ * Check permission before obtaining filesystem statistics for the @mnt
+ * mountpoint.
+ * @dentry is a handle on the superblock for the filesystem.
+ * Return 0 if permission is granted.
+ * @sb_mount:
+ * Check permission before an object specified by @dev_name is mounted on
+ * the mount point named by @nd. For an ordinary mount, @dev_name
+ * identifies a device if the file system type requires a device. For a
+ * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
+ * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
+ * pathname of the object being mounted.
+ * @dev_name contains the name for object being mounted.
+ * @path contains the path for mount point object.
+ * @type contains the filesystem type.
+ * @flags contains the mount flags.
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_copy_data:
+ * Allow mount option data to be copied prior to parsing by the filesystem,
+ * so that the security module can extract security-specific mount
+ * options cleanly (a filesystem may modify the data e.g. with strsep()).
+ * This also allows the original mount data to be stripped of security-
+ * specific options to avoid having to make filesystems aware of them.
+ * @type the type of filesystem being mounted.
+ * @orig the original mount data copied from userspace.
+ * @copy copied data which will be passed to the security module.
+ * Returns 0 if the copy was successful.
+ * @sb_remount:
+ * Extracts security system specific mount options and verifies no changes
+ * are being made to those options.
+ * @sb superblock being remounted
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_umount:
+ * Check permission before the @mnt file system is unmounted.
+ * @mnt contains the mounted file system.
+ * @flags contains the unmount flags, e.g. MNT_FORCE.
+ * Return 0 if permission is granted.
+ * @sb_pivotroot:
+ * Check permission before pivoting the root filesystem.
+ * @old_path contains the path for the new location of the
+ * current root (put_old).
+ * @new_path contains the path for the new root (new_root).
+ * Return 0 if permission is granted.
+ * @sb_set_mnt_opts:
+ * Set the security relevant mount options used for a superblock
+ * @sb the superblock to set security mount options for
+ * @opts binary data structure containing all lsm mount data
+ * @sb_clone_mnt_opts:
+ * Copy all security options from a given superblock to another
+ * @oldsb old superblock which contain information to clone
+ * @newsb new superblock which needs filled in
+ * @sb_parse_opts_str:
+ * Parse a string of security data filling in the opts structure
+ * @options string containing all mount options known by the LSM
+ * @opts binary data structure usable by the LSM
+ * @dentry_init_security:
+ * Compute a context for a dentry as the inode is not yet available
+ * since NFSv4 has no label backed by an EA anyway.
+ * @dentry dentry to use in calculating the context.
+ * @mode mode used to determine resource type.
+ * @name name of the last path component used to create file
+ * @ctx pointer to place the pointer to the resulting context in.
+ * @ctxlen point to place the length of the resulting context.
+ *
+ *
+ * Security hooks for inode operations.
+ *
+ * @inode_alloc_security:
+ * Allocate and attach a security structure to @inode->i_security. The
+ * i_security field is initialized to NULL when the inode structure is
+ * allocated.
+ * @inode contains the inode structure.
+ * Return 0 if operation was successful.
+ * @inode_free_security:
+ * @inode contains the inode structure.
+ * Deallocate the inode security structure and set @inode->i_security to
+ * NULL.
+ * @inode_init_security:
+ * Obtain the security attribute name suffix and value to set on a newly
+ * created inode and set up the incore security field for the new inode.
+ * This hook is called by the fs code as part of the inode creation
+ * transaction and provides for atomic labeling of the inode, unlike
+ * the post_create/mkdir/... hooks called by the VFS. The hook function
+ * is expected to allocate the name and value via kmalloc, with the caller
+ * being responsible for calling kfree after using them.
+ * If the security module does not use security attributes or does
+ * not wish to put a security attribute on this particular inode,
+ * then it should return -EOPNOTSUPP to skip this processing.
+ * @inode contains the inode structure of the newly created inode.
+ * @dir contains the inode structure of the parent directory.
+ * @qstr contains the last path component of the new object
+ * @name will be set to the allocated name suffix (e.g. selinux).
+ * @value will be set to the allocated attribute value.
+ * @len will be set to the length of the value.
+ * Returns 0 if @name and @value have been successfully set,
+ * -EOPNOTSUPP if no security attribute is needed, or
+ * -ENOMEM on memory allocation failure.
+ * @inode_create:
+ * Check permission to create a regular file.
+ * @dir contains inode structure of the parent of the new file.
+ * @dentry contains the dentry structure for the file to be created.
+ * @mode contains the file mode of the file to be created.
+ * Return 0 if permission is granted.
+ * @inode_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing
+ * link to the file.
+ * @dir contains the inode structure of the parent directory
+ * of the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @path_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing link
+ * to the file.
+ * @new_dir contains the path structure of the parent directory of
+ * the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @inode_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the inode structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @path_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the path structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @inode_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the inode structure of parent directory of
+ * the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @path_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the path structure of parent directory of
+ * the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @inode_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with inode structure @dir.
+ * @dir contains the inode structure of parent of the directory
+ * to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @path_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with path structure @path.
+ * @dir contains the path structure of parent of the directory
+ * to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @inode_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the inode structure of parent of the directory
+ * to be removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @path_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the path structure of parent of the directory to be
+ * removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @inode_mknod:
+ * Check permissions when creating a special file (or a socket or a fifo
+ * file created via the mknod system call). Note that if mknod operation
+ * is being done for a regular file, then the create hook will be called
+ * and not this hook.
+ * @dir contains the inode structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the device number.
+ * Return 0 if permission is granted.
+ * @path_mknod:
+ * Check permissions when creating a file. Note that this hook is called
+ * even if mknod operation is being done for a regular file.
+ * @dir contains the path structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the undecoded device number. Use new_decode_dev() to get
+ * the decoded device number.
+ * Return 0 if permission is granted.
+ * @inode_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the inode structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the inode structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the path structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the path structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_chmod:
+ * Check for permission to change DAC's permission of a file or directory.
+ * @dentry contains the dentry structure.
+ * @mnt contains the vfsmnt structure.
+ * @mode contains DAC's mode.
+ * Return 0 if permission is granted.
+ * @path_chown:
+ * Check for permission to change owner/group of a file or directory.
+ * @path contains the path structure.
+ * @uid contains new owner's ID.
+ * @gid contains new group's ID.
+ * Return 0 if permission is granted.
+ * @path_chroot:
+ * Check for permission to change root directory.
+ * @path contains the path structure.
+ * Return 0 if permission is granted.
+ * @inode_readlink:
+ * Check the permission to read the symbolic link.
+ * @dentry contains the dentry structure for the file link.
+ * Return 0 if permission is granted.
+ * @inode_follow_link:
+ * Check permission to follow a symbolic link when looking up a pathname.
+ * @dentry contains the dentry structure for the link.
+ * @inode contains the inode, which itself is not stable in RCU-walk
+ * @rcu indicates whether we are in RCU-walk mode.
+ * Return 0 if permission is granted.
+ * @inode_permission:
+ * Check permission before accessing an inode. This hook is called by the
+ * existing Linux permission function, so a security module can use it to
+ * provide additional checking for existing Linux permission checks.
+ * Notice that this hook is called when a file is opened (as well as many
+ * other operations), whereas the file_security_ops permission hook is
+ * called when the actual read/write operations are performed.
+ * @inode contains the inode structure to check.
+ * @mask contains the permission mask.
+ * Return 0 if permission is granted.
+ * @inode_setattr:
+ * Check permission before setting file attributes. Note that the kernel
+ * call to notify_change is performed from several locations, whenever
+ * file attributes change (such as when a file is truncated, chown/chmod
+ * operations, transferring disk quotas, etc).
+ * @dentry contains the dentry structure for the file.
+ * @attr is the iattr structure containing the new file attributes.
+ * Return 0 if permission is granted.
+ * @path_truncate:
+ * Check permission before truncating a file.
+ * @path contains the path structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_getattr:
+ * Check permission before obtaining file attributes.
+ * @mnt is the vfsmount where the dentry was looked up
+ * @dentry contains the dentry structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_setxattr:
+ * Check permission before setting the extended attributes
+ * @value identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_post_setxattr:
+ * Update inode security field after successful setxattr operation.
+ * @value identified by @name for @dentry.
+ * @inode_getxattr:
+ * Check permission before obtaining the extended attributes
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_listxattr:
+ * Check permission before obtaining the list of extended attribute
+ * names for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_removexattr:
+ * Check permission before removing the extended attribute
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_getsecurity:
+ * Retrieve a copy of the extended attribute representation of the
+ * security label associated with @name for @inode via @buffer. Note that
+ * @name is the remainder of the attribute name after the security prefix
+ * has been removed. @alloc is used to specify of the call should return a
+ * value via the buffer or just the value length Return size of buffer on
+ * success.
+ * @inode_setsecurity:
+ * Set the security label associated with @name for @inode from the
+ * extended attribute value @value. @size indicates the size of the
+ * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
+ * Note that @name is the remainder of the attribute name after the
+ * security. prefix has been removed.
+ * Return 0 on success.
+ * @inode_listsecurity:
+ * Copy the extended attribute names for the security labels
+ * associated with @inode into @buffer. The maximum size of @buffer
+ * is specified by @buffer_size. @buffer may be NULL to request
+ * the size of the buffer required.
+ * Returns number of bytes used/required on success.
+ * @inode_need_killpriv:
+ * Called when an inode has been changed.
+ * @dentry is the dentry being changed.
+ * Return <0 on error to abort the inode change operation.
+ * Return 0 if inode_killpriv does not need to be called.
+ * Return >0 if inode_killpriv does need to be called.
+ * @inode_killpriv:
+ * The setuid bit is being removed. Remove similar security labels.
+ * Called with the dentry->d_inode->i_mutex held.
+ * @dentry is the dentry being changed.
+ * Return 0 on success. If error is returned, then the operation
+ * causing setuid bit removal is failed.
+ * @inode_getsecid:
+ * Get the secid associated with the node.
+ * @inode contains a pointer to the inode.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for file operations
+ *
+ * @file_permission:
+ * Check file permissions before accessing an open file. This hook is
+ * called by various operations that read or write files. A security
+ * module can use this hook to perform additional checking on these
+ * operations, e.g. to revalidate permissions on use to support privilege
+ * bracketing or policy changes. Notice that this hook is used when the
+ * actual read/write operations are performed, whereas the
+ * inode_security_ops hook is called when a file is opened (as well as
+ * many other operations).
+ * Caveat: Although this hook can be used to revalidate permissions for
+ * various system call operations that read or write files, it does not
+ * address the revalidation of permissions for memory-mapped files.
+ * Security modules must handle this separately if they need such
+ * revalidation.
+ * @file contains the file structure being accessed.
+ * @mask contains the requested permissions.
+ * Return 0 if permission is granted.
+ * @file_alloc_security:
+ * Allocate and attach a security structure to the file->f_security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @file contains the file structure to secure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @file_free_security:
+ * Deallocate and free any security structures stored in file->f_security.
+ * @file contains the file structure being modified.
+ * @file_ioctl:
+ * @file contains the file structure.
+ * @cmd contains the operation to perform.
+ * @arg contains the operational arguments.
+ * Check permission for an ioctl operation on @file. Note that @arg
+ * sometimes represents a user space pointer; in other cases, it may be a
+ * simple integer value. When @arg represents a user space pointer, it
+ * should never be used by the security module.
+ * Return 0 if permission is granted.
+ * @mmap_addr :
+ * Check permissions for a mmap operation at @addr.
+ * @addr contains virtual address that will be used for the operation.
+ * Return 0 if permission is granted.
+ * @mmap_file :
+ * Check permissions for a mmap operation. The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @file_mprotect:
+ * Check permissions before changing memory access permissions.
+ * @vma contains the memory region to modify.
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * Return 0 if permission is granted.
+ * @file_lock:
+ * Check permission before performing file locking operations.
+ * Note: this hook mediates both flock and fcntl style locks.
+ * @file contains the file structure.
+ * @cmd contains the posix-translated lock operation to perform
+ * (e.g. F_RDLCK, F_WRLCK).
+ * Return 0 if permission is granted.
+ * @file_fcntl:
+ * Check permission before allowing the file operation specified by @cmd
+ * from being performed on the file @file. Note that @arg sometimes
+ * represents a user space pointer; in other cases, it may be a simple
+ * integer value. When @arg represents a user space pointer, it should
+ * never be used by the security module.
+ * @file contains the file structure.
+ * @cmd contains the operation to be performed.
+ * @arg contains the operational arguments.
+ * Return 0 if permission is granted.
+ * @file_set_fowner:
+ * Save owner security information (typically from current->security) in
+ * file->f_security for later use by the send_sigiotask hook.
+ * @file contains the file structure to update.
+ * Return 0 on success.
+ * @file_send_sigiotask:
+ * Check permission for the file owner @fown to send SIGIO or SIGURG to the
+ * process @tsk. Note that this hook is sometimes called from interrupt.
+ * Note that the fown_struct, @fown, is never outside the context of a
+ * struct file, so the file structure (and associated security information)
+ * can always be obtained:
+ * container_of(fown, struct file, f_owner)
+ * @tsk contains the structure of task receiving signal.
+ * @fown contains the file owner information.
+ * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
+ * Return 0 if permission is granted.
+ * @file_receive:
+ * This hook allows security modules to control the ability of a process
+ * to receive an open file descriptor via socket IPC.
+ * @file contains the file structure being received.
+ * Return 0 if permission is granted.
+ * @file_open
+ * Save open-time permission checking state for later use upon
+ * file_permission, and recheck access if anything has changed
+ * since inode_permission.
+ *
+ * Security hooks for task operations.
+ *
+ * @task_create:
+ * Check permission before creating a child process. See the clone(2)
+ * manual page for definitions of the @clone_flags.
+ * @clone_flags contains the flags indicating what should be shared.
+ * Return 0 if permission is granted.
+ * @task_free:
+ * @task task being freed
+ * Handle release of task-related resources. (Note that this can be called
+ * from interrupt context.)
+ * @cred_alloc_blank:
+ * @cred points to the credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Only allocate sufficient memory and attach to @cred such that
+ * cred_transfer() will not get ENOMEM.
+ * @cred_free:
+ * @cred points to the credentials.
+ * Deallocate and clear the cred->security field in a set of credentials.
+ * @cred_prepare:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Prepare a new set of credentials by copying the data from the old set.
+ * @cred_transfer:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Transfer data from original creds to new creds
+ * @kernel_act_as:
+ * Set the credentials for a kernel service to act as (subjective context).
+ * @new points to the credentials to be modified.
+ * @secid specifies the security ID to be set
+ * The current task must be the one that nominated @secid.
+ * Return 0 if successful.
+ * @kernel_create_files_as:
+ * Set the file creation context in a set of credentials to be the same as
+ * the objective context of the specified inode.
+ * @new points to the credentials to be modified.
+ * @inode points to the inode to use as a reference.
+ * The current task must be the one that nominated @inode.
+ * Return 0 if successful.
+ * @kernel_fw_from_file:
+ * Load firmware from userspace (not called for built-in firmware).
+ * @file contains the file structure pointing to the file containing
+ * the firmware to load. This argument will be NULL if the firmware
+ * was loaded via the uevent-triggered blob-based interface exposed
+ * by CONFIG_FW_LOADER_USER_HELPER.
+ * @buf pointer to buffer containing firmware contents.
+ * @size length of the firmware contents.
+ * Return 0 if permission is granted.
+ * @kernel_module_request:
+ * Ability to trigger the kernel to automatically upcall to userspace for
+ * userspace to load a kernel module with the given name.
+ * @kmod_name name of the module requested by the kernel
+ * Return 0 if successful.
+ * @kernel_module_from_file:
+ * Load a kernel module from userspace.
+ * @file contains the file structure pointing to the file containing
+ * the kernel module to load. If the module is being loaded from a blob,
+ * this argument will be NULL.
+ * Return 0 if permission is granted.
+ * @task_fix_setuid:
+ * Update the module's state after setting one or more of the user
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*uid system calls invoked this hook. If
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaces
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
+ * @task_setpgid:
+ * Check permission before setting the process group identifier of the
+ * process @p to @pgid.
+ * @p contains the task_struct for process being modified.
+ * @pgid contains the new pgid.
+ * Return 0 if permission is granted.
+ * @task_getpgid:
+ * Check permission before getting the process group identifier of the
+ * process @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsid:
+ * Check permission before getting the session identifier of the process
+ * @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsecid:
+ * Retrieve the security identifier of the process @p.
+ * @p contains the task_struct for the process and place is into @secid.
+ * In case of failure, @secid will be set to zero.
+ *
+ * @task_setnice:
+ * Check permission before setting the nice value of @p to @nice.
+ * @p contains the task_struct of process.
+ * @nice contains the new nice value.
+ * Return 0 if permission is granted.
+ * @task_setioprio
+ * Check permission before setting the ioprio value of @p to @ioprio.
+ * @p contains the task_struct of process.
+ * @ioprio contains the new ioprio value
+ * Return 0 if permission is granted.
+ * @task_getioprio
+ * Check permission before getting the ioprio value of @p.
+ * @p contains the task_struct of process.
+ * Return 0 if permission is granted.
+ * @task_setrlimit:
+ * Check permission before setting the resource limits of the current
+ * process for @resource to @new_rlim. The old resource limit values can
+ * be examined by dereferencing (current->signal->rlim + resource).
+ * @resource contains the resource whose limit is being set.
+ * @new_rlim contains the new limits for @resource.
+ * Return 0 if permission is granted.
+ * @task_setscheduler:
+ * Check permission before setting scheduling policy and/or parameters of
+ * process @p based on @policy and @lp.
+ * @p contains the task_struct for process.
+ * @policy contains the scheduling policy.
+ * @lp contains the scheduling parameters.
+ * Return 0 if permission is granted.
+ * @task_getscheduler:
+ * Check permission before obtaining scheduling information for process
+ * @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_movememory
+ * Check permission before moving memory owned by process @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_kill:
+ * Check permission before sending signal @sig to @p. @info can be NULL,
+ * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
+ * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
+ * from the kernel and should typically be permitted.
+ * SIGIO signals are handled separately by the send_sigiotask hook in
+ * file_security_ops.
+ * @p contains the task_struct for process.
+ * @info contains the signal information.
+ * @sig contains the signal value.
+ * @secid contains the sid of the process where the signal originated
+ * Return 0 if permission is granted.
+ * @task_wait:
+ * Check permission before allowing a process to reap a child process @p
+ * and collect its status information.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_prctl:
+ * Check permission before performing a process control operation on the
+ * current process.
+ * @option contains the operation.
+ * @arg2 contains a argument.
+ * @arg3 contains a argument.
+ * @arg4 contains a argument.
+ * @arg5 contains a argument.
+ * Return -ENOSYS if no-one wanted to handle this op, any other value to
+ * cause prctl() to return immediately with that value.
+ * @task_to_inode:
+ * Set the security attributes for an inode based on an associated task's
+ * security attributes, e.g. for /proc/pid inodes.
+ * @p contains the task_struct for the task.
+ * @inode contains the inode structure for the inode.
+ *
+ * Security hooks for Netlink messaging.
+ *
+ * @netlink_send:
+ * Save security information for a netlink message so that permission
+ * checking can be performed when the message is processed. The security
+ * information can be saved using the eff_cap field of the
+ * netlink_skb_parms structure. Also may be used to provide fine
+ * grained control over message transmission.
+ * @sk associated sock of task sending the message.
+ * @skb contains the sk_buff structure for the netlink message.
+ * Return 0 if the information was successfully saved and message
+ * is allowed to be transmitted.
+ *
+ * Security hooks for Unix domain networking.
+ *
+ * @unix_stream_connect:
+ * Check permissions before establishing a Unix domain stream connection
+ * between @sock and @other.
+ * @sock contains the sock structure.
+ * @other contains the peer sock structure.
+ * @newsk contains the new sock structure.
+ * Return 0 if permission is granted.
+ * @unix_may_send:
+ * Check permissions before connecting or sending datagrams from @sock to
+ * @other.
+ * @sock contains the socket structure.
+ * @other contains the peer socket structure.
+ * Return 0 if permission is granted.
+ *
+ * The @unix_stream_connect and @unix_may_send hooks were necessary because
+ * Linux provides an alternative to the conventional file name space for Unix
+ * domain sockets. Whereas binding and connecting to sockets in the file name
+ * space is mediated by the typical file permissions (and caught by the mknod
+ * and permission hooks in inode_security_ops), binding and connecting to
+ * sockets in the abstract name space is completely unmediated. Sufficient
+ * control of Unix domain sockets in the abstract name space isn't possible
+ * using only the socket layer hooks, since we need to know the actual target
+ * socket, which is not looked up until we are inside the af_unix code.
+ *
+ * Security hooks for socket operations.
+ *
+ * @socket_create:
+ * Check permissions prior to creating a new socket.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * Return 0 if permission is granted.
+ * @socket_post_create:
+ * This hook allows a module to update or allocate a per-socket security
+ * structure. Note that the security field was not added directly to the
+ * socket structure, but rather, the socket security information is stored
+ * in the associated inode. Typically, the inode alloc_security hook will
+ * allocate and and attach security information to
+ * sock->inode->i_security. This hook may be used to update the
+ * sock->inode->i_security field with additional information that wasn't
+ * available when the inode was allocated.
+ * @sock contains the newly created socket structure.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * @socket_bind:
+ * Check permission before socket protocol layer bind operation is
+ * performed and the socket @sock is bound to the address specified in the
+ * @address parameter.
+ * @sock contains the socket structure.
+ * @address contains the address to bind to.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_connect:
+ * Check permission before socket protocol layer connect operation
+ * attempts to connect socket @sock to a remote address, @address.
+ * @sock contains the socket structure.
+ * @address contains the address of remote endpoint.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_listen:
+ * Check permission before socket protocol layer listen operation.
+ * @sock contains the socket structure.
+ * @backlog contains the maximum length for the pending connection queue.
+ * Return 0 if permission is granted.
+ * @socket_accept:
+ * Check permission before accepting a new connection. Note that the new
+ * socket, @newsock, has been created and some information copied to it,
+ * but the accept operation has not actually been performed.
+ * @sock contains the listening socket structure.
+ * @newsock contains the newly created server socket for connection.
+ * Return 0 if permission is granted.
+ * @socket_sendmsg:
+ * Check permission before transmitting a message to another socket.
+ * @sock contains the socket structure.
+ * @msg contains the message to be transmitted.
+ * @size contains the size of message.
+ * Return 0 if permission is granted.
+ * @socket_recvmsg:
+ * Check permission before receiving a message from a socket.
+ * @sock contains the socket structure.
+ * @msg contains the message structure.
+ * @size contains the size of message structure.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @socket_getsockname:
+ * Check permission before the local address (name) of the socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getpeername:
+ * Check permission before the remote address (name) of a socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getsockopt:
+ * Check permissions before retrieving the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to retrieve option from.
+ * @optname contains the name of option to retrieve.
+ * Return 0 if permission is granted.
+ * @socket_setsockopt:
+ * Check permissions before setting the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to set options for.
+ * @optname contains the name of the option to set.
+ * Return 0 if permission is granted.
+ * @socket_shutdown:
+ * Checks permission before all or part of a connection on the socket
+ * @sock is shut down.
+ * @sock contains the socket structure.
+ * @how contains the flag indicating how future sends and receives
+ * are handled.
+ * Return 0 if permission is granted.
+ * @socket_sock_rcv_skb:
+ * Check permissions on incoming network packets. This hook is distinct
+ * from Netfilter's IP input hooks since it is the first time that the
+ * incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
+ * @sk contains the sock (not socket) associated with the incoming sk_buff.
+ * @skb contains the incoming network data.
+ * @socket_getpeersec_stream:
+ * This hook allows the security module to provide peer socket security
+ * state for unix or connected tcp sockets to userspace via getsockopt
+ * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
+ * socket is associated with an ipsec SA.
+ * @sock is the local socket.
+ * @optval userspace memory where the security state is to be copied.
+ * @optlen userspace int where the module should copy the actual length
+ * of the security state.
+ * @len as input is the maximum length to copy to userspace provided
+ * by the caller.
+ * Return 0 if all is well, otherwise, typical getsockopt return
+ * values.
+ * @socket_getpeersec_dgram:
+ * This hook allows the security module to provide peer socket security
+ * state for udp sockets on a per-packet basis to userspace via
+ * getsockopt SO_GETPEERSEC. The application must first have indicated
+ * the IP_PASSSEC option via getsockopt. It can then retrieve the
+ * security state returned by this hook for a packet via the SCM_SECURITY
+ * ancillary message type.
+ * @skb is the skbuff for the packet being queried
+ * @secdata is a pointer to a buffer in which to copy the security data
+ * @seclen is the maximum length for @secdata
+ * Return 0 on success, error on failure.
+ * @sk_alloc_security:
+ * Allocate and attach a security structure to the sk->sk_security field,
+ * which is used to copy security attributes between local stream sockets.
+ * @sk_free_security:
+ * Deallocate security structure.
+ * @sk_clone_security:
+ * Clone/copy security structure.
+ * @sk_getsecid:
+ * Retrieve the LSM-specific secid for the sock to enable caching
+ * of network authorizations.
+ * @sock_graft:
+ * Sets the socket's isec sid to the sock's sid.
+ * @inet_conn_request:
+ * Sets the openreq's sid to socket's sid with MLS portion taken
+ * from peer sid.
+ * @inet_csk_clone:
+ * Sets the new child socket's sid to the openreq sid.
+ * @inet_conn_established:
+ * Sets the connection's peersid to the secmark on skb.
+ * @secmark_relabel_packet:
+ * check if the process should be allowed to relabel packets to
+ * the given secid
+ * @security_secmark_refcount_inc
+ * tells the LSM to increment the number of secmark labeling rules loaded
+ * @security_secmark_refcount_dec
+ * tells the LSM to decrement the number of secmark labeling rules loaded
+ * @req_classify_flow:
+ * Sets the flow's sid to the openreq sid.
+ * @tun_dev_alloc_security:
+ * This hook allows a module to allocate a security structure for a TUN
+ * device.
+ * @security pointer to a security structure pointer.
+ * Returns a zero on success, negative values on failure.
+ * @tun_dev_free_security:
+ * This hook allows a module to free the security structure for a TUN
+ * device.
+ * @security pointer to the TUN device's security structure
+ * @tun_dev_create:
+ * Check permissions prior to creating a new TUN device.
+ * @tun_dev_attach_queue:
+ * Check permissions prior to attaching to a TUN device queue.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_attach:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's sock structure.
+ * @sk contains the existing sock structure.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_open:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's security structure.
+ * @security pointer to the TUN devices's security structure.
+ *
+ * Security hooks for XFRM operations.
+ *
+ * @xfrm_policy_alloc_security:
+ * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
+ * Database used by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level policy update program (e.g., setkey).
+ * Allocate a security structure to the xp->security field; the security
+ * field is initialized to NULL when the xfrm_policy is allocated.
+ * Return 0 if operation was successful (memory to allocate, legal context)
+ * @gfp is to specify the context for the allocation
+ * @xfrm_policy_clone_security:
+ * @old_ctx contains an existing xfrm_sec_ctx.
+ * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
+ * Allocate a security structure in new_ctxp that contains the
+ * information from the old_ctx structure.
+ * Return 0 if operation was successful (memory to allocate).
+ * @xfrm_policy_free_security:
+ * @ctx contains the xfrm_sec_ctx
+ * Deallocate xp->security.
+ * @xfrm_policy_delete_security:
+ * @ctx contains the xfrm_sec_ctx.
+ * Authorize deletion of xp->security.
+ * @xfrm_state_alloc:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level SA generation program (e.g., setkey or racoon).
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_free_security:
+ * @x contains the xfrm_state.
+ * Deallocate x->security.
+ * @xfrm_state_delete_security:
+ * @x contains the xfrm_state.
+ * Authorize deletion of x->security.
+ * @xfrm_policy_lookup:
+ * @ctx contains the xfrm_sec_ctx for which the access control is being
+ * checked.
+ * @fl_secid contains the flow security label that is used to authorize
+ * access to the policy xp.
+ * @dir contains the direction of the flow (input or output).
+ * Check permission when a flow selects a xfrm_policy for processing
+ * XFRMs on a packet. The hook is called when selecting either a
+ * per-socket policy or a generic xfrm policy.
+ * Return 0 if permission is granted, -ESRCH otherwise, or -errno
+ * on other errors.
+ * @xfrm_state_pol_flow_match:
+ * @x contains the state to match.
+ * @xp contains the policy to check for a match.
+ * @fl contains the flow to check for a match.
+ * Return 1 if there is a match.
+ * @xfrm_decode_session:
+ * @skb points to skb to decode.
+ * @secid points to the flow key secid to set.
+ * @ckall says if all xfrms used should be checked for same secid.
+ * Return 0 if ckall is zero or all xfrms used have the same secid.
+ *
+ * Security hooks affecting all Key Management operations
+ *
+ * @key_alloc:
+ * Permit allocation of a key and assign security data. Note that key does
+ * not have a serial number assigned at this point.
+ * @key points to the key.
+ * @flags is the allocation flags
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_free:
+ * Notification of destruction; free security data.
+ * @key points to the key.
+ * No return value.
+ * @key_permission:
+ * See whether a specific operational right is granted to a process on a
+ * key.
+ * @key_ref refers to the key (key pointer + possession attribute bit).
+ * @cred points to the credentials to provide the context against which to
+ * evaluate the security data on the key.
+ * @perm describes the combination of permissions required of this key.
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_getsecurity:
+ * Get a textual representation of the security context attached to a key
+ * for the purposes of honouring KEYCTL_GETSECURITY. This function
+ * allocates the storage for the NUL-terminated string and the caller
+ * should free it.
+ * @key points to the key to be queried.
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ *
+ * Security hooks affecting all System V IPC operations.
+ *
+ * @ipc_permission:
+ * Check permissions for access to IPC
+ * @ipcp contains the kernel IPC permission structure
+ * @flag contains the desired (requested) permission set
+ * Return 0 if permission is granted.
+ * @ipc_getsecid:
+ * Get the secid associated with the ipc object.
+ * @ipcp contains the kernel IPC permission structure.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for individual messages held in System V IPC message queues
+ * @msg_msg_alloc_security:
+ * Allocate and attach a security structure to the msg->security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @msg contains the message structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_msg_free_security:
+ * Deallocate the security structure for this message.
+ * @msg contains the message structure to be modified.
+ *
+ * Security hooks for System V IPC Message Queues
+ *
+ * @msg_queue_alloc_security:
+ * Allocate and attach a security structure to the
+ * msq->q_perm.security field. The security field is initialized to
+ * NULL when the structure is first created.
+ * @msq contains the message queue structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_queue_free_security:
+ * Deallocate security structure for this message queue.
+ * @msq contains the message queue structure to be modified.
+ * @msg_queue_associate:
+ * Check permission when a message queue is requested through the
+ * msgget system call. This hook is only called when returning the
+ * message queue identifier for an existing message queue, not when a
+ * new message queue is created.
+ * @msq contains the message queue to act upon.
+ * @msqflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgctl:
+ * Check permission when a message control operation specified by @cmd
+ * is to be performed on the message queue @msq.
+ * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
+ * @msq contains the message queue to act upon. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgsnd:
+ * Check permission before a message, @msg, is enqueued on the message
+ * queue, @msq.
+ * @msq contains the message queue to send message to.
+ * @msg contains the message to be enqueued.
+ * @msqflg contains operational flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgrcv:
+ * Check permission before a message, @msg, is removed from the message
+ * queue, @msq. The @target task structure contains a pointer to the
+ * process that will be receiving the message (not equal to the current
+ * process when inline receives are being performed).
+ * @msq contains the message queue to retrieve message from.
+ * @msg contains the message destination.
+ * @target contains the task structure for recipient process.
+ * @type contains the type of message requested.
+ * @mode contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Shared Memory Segments
+ *
+ * @shm_alloc_security:
+ * Allocate and attach a security structure to the shp->shm_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @shp contains the shared memory structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @shm_free_security:
+ * Deallocate the security struct for this memory segment.
+ * @shp contains the shared memory structure to be modified.
+ * @shm_associate:
+ * Check permission when a shared memory region is requested through the
+ * shmget system call. This hook is only called when returning the shared
+ * memory region identifier for an existing region, not when a new shared
+ * memory region is created.
+ * @shp contains the shared memory structure to be modified.
+ * @shmflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @shm_shmctl:
+ * Check permission when a shared memory control operation specified by
+ * @cmd is to be performed on the shared memory region @shp.
+ * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
+ * @shp contains shared memory structure to be modified.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @shm_shmat:
+ * Check permissions prior to allowing the shmat system call to attach the
+ * shared memory segment @shp to the data segment of the calling process.
+ * The attaching address is specified by @shmaddr.
+ * @shp contains the shared memory structure to be modified.
+ * @shmaddr contains the address to attach memory region to.
+ * @shmflg contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Semaphores
+ *
+ * @sem_alloc_security:
+ * Allocate and attach a security structure to the sma->sem_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @sma contains the semaphore structure
+ * Return 0 if operation was successful and permission is granted.
+ * @sem_free_security:
+ * deallocate security struct for this semaphore
+ * @sma contains the semaphore structure.
+ * @sem_associate:
+ * Check permission when a semaphore is requested through the semget
+ * system call. This hook is only called when returning the semaphore
+ * identifier for an existing semaphore, not when a new one must be
+ * created.
+ * @sma contains the semaphore structure.
+ * @semflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @sem_semctl:
+ * Check permission when a semaphore operation specified by @cmd is to be
+ * performed on the semaphore @sma. The @sma may be NULL, e.g. for
+ * IPC_INFO or SEM_INFO.
+ * @sma contains the semaphore structure. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @sem_semop
+ * Check permissions before performing operations on members of the
+ * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
+ * may be modified.
+ * @sma contains the semaphore structure.
+ * @sops contains the operations to perform.
+ * @nsops contains the number of operations to perform.
+ * @alter contains the flag indicating whether changes are to be made.
+ * Return 0 if permission is granted.
+ *
+ * @binder_set_context_mgr
+ * Check whether @mgr is allowed to be the binder context manager.
+ * @mgr contains the task_struct for the task being registered.
+ * Return 0 if permission is granted.
+ * @binder_transaction
+ * Check whether @from is allowed to invoke a binder transaction call
+ * to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_binder
+ * Check whether @from is allowed to transfer a binder reference to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_file
+ * Check whether @from is allowed to transfer @file to @to.
+ * @from contains the task_struct for the sending task.
+ * @file contains the struct file being transferred.
+ * @to contains the task_struct for the receiving task.
+ *
+ * @ptrace_access_check:
+ * Check permission before allowing the current process to trace the
+ * @child process.
+ * Security modules may also want to perform a process tracing check
+ * during an execve in the set_security or apply_creds hooks of
+ * tracing check during an execve in the bprm_set_creds hook of
+ * binprm_security_ops if the process is being traced and its security
+ * attributes would be changed by the execve.
+ * @child contains the task_struct structure for the target process.
+ * @mode contains the PTRACE_MODE flags indicating the form of access.
+ * Return 0 if permission is granted.
+ * @ptrace_traceme:
+ * Check that the @parent process has sufficient permission to trace the
+ * current process before allowing the current process to present itself
+ * to the @parent process for tracing.
+ * @parent contains the task_struct structure for debugger process.
+ * Return 0 if permission is granted.
+ * @capget:
+ * Get the @effective, @inheritable, and @permitted capability sets for
+ * the @target process. The hook may also perform permission checking to
+ * determine if the current process is allowed to see the capability sets
+ * of the @target process.
+ * @target contains the task_struct structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 if the capability sets were successfully obtained.
+ * @capset:
+ * Set the @effective, @inheritable, and @permitted capability sets for
+ * the current process.
+ * @new contains the new credentials structure for target process.
+ * @old contains the current credentials structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 and update @new if permission is granted.
+ * @capable:
+ * Check whether the @tsk process has the @cap capability in the indicated
+ * credentials.
+ * @cred contains the credentials to use.
+ * @ns contains the user namespace we want the capability in
+ * @cap contains the capability <include/linux/capability.h>.
+ * @audit: Whether to write an audit message or not
+ * Return 0 if the capability is granted for @tsk.
+ * @syslog:
+ * Check permission before accessing the kernel message ring or changing
+ * logging to the console.
+ * See the syslog(2) manual page for an explanation of the @type values.
+ * @type contains the type of action.
+ * @from_file indicates the context of action (if it came from /proc).
+ * Return 0 if permission is granted.
+ * @settime:
+ * Check permission to change the system time.
+ * struct timespec and timezone are defined in include/linux/time.h
+ * @ts contains new time
+ * @tz contains new timezone
+ * Return 0 if permission is granted.
+ * @vm_enough_memory:
+ * Check permissions for allocating a new virtual mapping.
+ * @mm contains the mm struct it is being added to.
+ * @pages contains the number of pages.
+ * Return 0 if permission is granted.
+ *
+ * @ismaclabel:
+ * Check if the extended attribute specified by @name
+ * represents a MAC label. Returns 1 if name is a MAC
+ * attribute otherwise returns 0.
+ * @name full extended attribute name to check against
+ * LSM as a MAC label.
+ *
+ * @secid_to_secctx:
+ * Convert secid to security context. If secdata is NULL the length of
+ * the result will be returned in seclen, but no secdata will be returned.
+ * This does mean that the length could change between calls to check the
+ * length and the next call which actually allocates and returns the
+ * secdata.
+ * @secid contains the security ID.
+ * @secdata contains the pointer that stores the converted security
+ * context.
+ * @seclen pointer which contains the length of the data
+ * @secctx_to_secid:
+ * Convert security context to secid.
+ * @secid contains the pointer to the generated security ID.
+ * @secdata contains the security context.
+ *
+ * @release_secctx:
+ * Release the security context.
+ * @secdata contains the security context.
+ * @seclen contains the length of the security context.
+ *
+ * Security hooks for Audit
+ *
+ * @audit_rule_init:
+ * Allocate and initialize an LSM audit rule structure.
+ * @field contains the required Audit action.
+ * Fields flags are defined in include/linux/audit.h
+ * @op contains the operator the rule uses.
+ * @rulestr contains the context where the rule will be applied to.
+ * @lsmrule contains a pointer to receive the result.
+ * Return 0 if @lsmrule has been successfully set,
+ * -EINVAL in case of an invalid rule.
+ *
+ * @audit_rule_known:
+ * Specifies whether given @rule contains any fields related to
+ * current LSM.
+ * @rule contains the audit rule of interest.
+ * Return 1 in case of relation found, 0 otherwise.
+ *
+ * @audit_rule_match:
+ * Determine if given @secid matches a rule previously approved
+ * by @audit_rule_known.
+ * @secid contains the security id in question.
+ * @field contains the field which relates to current LSM.
+ * @op contains the operator that will be used for matching.
+ * @rule points to the audit rule that will be checked against.
+ * @actx points to the audit context associated with the check.
+ * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
+ *
+ * @audit_rule_free:
+ * Deallocate the LSM audit rule structure previously allocated by
+ * audit_rule_init.
+ * @rule contains the allocated rule
+ *
+ * @inode_notifysecctx:
+ * Notify the security module of what the security context of an inode
+ * should be. Initializes the incore security context managed by the
+ * security module for this inode. Example usage: NFS client invokes
+ * this hook to initialize the security context in its incore inode to the
+ * value provided by the server for the file when the server returned the
+ * file's attributes to the client.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_setsecctx:
+ * Change the security context of an inode. Updates the
+ * incore security context managed by the security module and invokes the
+ * fs code as needed (via __vfs_setxattr_noperm) to update any backing
+ * xattrs that represent the context. Example usage: NFS server invokes
+ * this hook to change the security context in its incore inode and on the
+ * backing filesystem to a value provided by the client on a SETATTR
+ * operation.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @dentry contains the inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_getsecctx:
+ * On success, returns 0 and fills out @ctx and @ctxlen with the security
+ * context for the given @inode.
+ *
+ * @inode we wish to get the security context of.
+ * @ctx is a pointer in which to place the allocated security context.
+ * @ctxlen points to the place to put the length of @ctx.
+ * This is the main security structure.
+ */
+
+union security_list_options {
+ int (*binder_set_context_mgr)(struct task_struct *mgr);
+ int (*binder_transaction)(struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_binder)(struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_file)(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file);
+
+ int (*ptrace_access_check)(struct task_struct *child,
+ unsigned int mode);
+ int (*ptrace_traceme)(struct task_struct *parent);
+ int (*capget)(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
+ int (*capset)(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+ int (*capable)(const struct cred *cred, struct user_namespace *ns,
+ int cap, int audit);
+ int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
+ int (*quota_on)(struct dentry *dentry);
+ int (*syslog)(int type);
+ int (*settime)(const struct timespec *ts, const struct timezone *tz);
+ int (*vm_enough_memory)(struct mm_struct *mm, long pages);
+
+ int (*bprm_set_creds)(struct linux_binprm *bprm);
+ int (*bprm_check_security)(struct linux_binprm *bprm);
+ int (*bprm_secureexec)(struct linux_binprm *bprm);
+ void (*bprm_committing_creds)(struct linux_binprm *bprm);
+ void (*bprm_committed_creds)(struct linux_binprm *bprm);
+
+ int (*sb_alloc_security)(struct super_block *sb);
+ void (*sb_free_security)(struct super_block *sb);
+ int (*sb_copy_data)(char *orig, char *copy);
+ int (*sb_remount)(struct super_block *sb, void *data);
+ int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
+ int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
+ int (*sb_statfs)(struct dentry *dentry);
+ int (*sb_mount)(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
+ int (*sb_umount)(struct vfsmount *mnt, int flags);
+ int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
+ int (*sb_set_mnt_opts)(struct super_block *sb,
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags);
+ int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
+ struct super_block *newsb);
+ int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
+ int (*dentry_init_security)(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen);
+
+
+#ifdef CONFIG_SECURITY_PATH
+ int (*path_unlink)(struct path *dir, struct dentry *dentry);
+ int (*path_mkdir)(struct path *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*path_rmdir)(struct path *dir, struct dentry *dentry);
+ int (*path_mknod)(struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev);
+ int (*path_truncate)(struct path *path);
+ int (*path_symlink)(struct path *dir, struct dentry *dentry,
+ const char *old_name);
+ int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
+ struct dentry *new_dentry);
+ int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
+ struct path *new_dir,
+ struct dentry *new_dentry);
+ int (*path_chmod)(struct path *path, umode_t mode);
+ int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
+ int (*path_chroot)(struct path *path);
+#endif
+
+ int (*inode_alloc_security)(struct inode *inode);
+ void (*inode_free_security)(struct inode *inode);
+ int (*inode_init_security)(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ const char **name, void **value,
+ size_t *len);
+ int (*inode_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry);
+ int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
+ int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
+ const char *old_name);
+ int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
+ int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev);
+ int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+ int (*inode_readlink)(struct dentry *dentry);
+ int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
+ bool rcu);
+ int (*inode_permission)(struct inode *inode, int mask);
+ int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
+ int (*inode_getattr)(const struct path *path);
+ int (*inode_setxattr)(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
+ const void *value, size_t size,
+ int flags);
+ int (*inode_getxattr)(struct dentry *dentry, const char *name);
+ int (*inode_listxattr)(struct dentry *dentry);
+ int (*inode_removexattr)(struct dentry *dentry, const char *name);
+ int (*inode_need_killpriv)(struct dentry *dentry);
+ int (*inode_killpriv)(struct dentry *dentry);
+ int (*inode_getsecurity)(const struct inode *inode, const char *name,
+ void **buffer, bool alloc);
+ int (*inode_setsecurity)(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags);
+ int (*inode_listsecurity)(struct inode *inode, char *buffer,
+ size_t buffer_size);
+ void (*inode_getsecid)(const struct inode *inode, u32 *secid);
+
+ int (*file_permission)(struct file *file, int mask);
+ int (*file_alloc_security)(struct file *file);
+ void (*file_free_security)(struct file *file);
+ int (*file_ioctl)(struct file *file, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap_addr)(unsigned long addr);
+ int (*mmap_file)(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
+ int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
+ unsigned long prot);
+ int (*file_lock)(struct file *file, unsigned int cmd);
+ int (*file_fcntl)(struct file *file, unsigned int cmd,
+ unsigned long arg);
+ void (*file_set_fowner)(struct file *file);
+ int (*file_send_sigiotask)(struct task_struct *tsk,
+ struct fown_struct *fown, int sig);
+ int (*file_receive)(struct file *file);
+ int (*file_open)(struct file *file, const struct cred *cred);
+
+ int (*task_create)(unsigned long clone_flags);
+ void (*task_free)(struct task_struct *task);
+ int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
+ void (*cred_free)(struct cred *cred);
+ int (*cred_prepare)(struct cred *new, const struct cred *old,
+ gfp_t gfp);
+ void (*cred_transfer)(struct cred *new, const struct cred *old);
+ int (*kernel_act_as)(struct cred *new, u32 secid);
+ int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
+ int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
+ int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_module_from_file)(struct file *file);
+ int (*task_fix_setuid)(struct cred *new, const struct cred *old,
+ int flags);
+ int (*task_setpgid)(struct task_struct *p, pid_t pgid);
+ int (*task_getpgid)(struct task_struct *p);
+ int (*task_getsid)(struct task_struct *p);
+ void (*task_getsecid)(struct task_struct *p, u32 *secid);
+ int (*task_setnice)(struct task_struct *p, int nice);
+ int (*task_setioprio)(struct task_struct *p, int ioprio);
+ int (*task_getioprio)(struct task_struct *p);
+ int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
+ int (*task_setscheduler)(struct task_struct *p);
+ int (*task_getscheduler)(struct task_struct *p);
+ int (*task_movememory)(struct task_struct *p);
+ int (*task_kill)(struct task_struct *p, struct siginfo *info,
+ int sig, u32 secid);
+ int (*task_wait)(struct task_struct *p);
+ int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+ void (*task_to_inode)(struct task_struct *p, struct inode *inode);
+
+ int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
+ void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
+
+ int (*msg_msg_alloc_security)(struct msg_msg *msg);
+ void (*msg_msg_free_security)(struct msg_msg *msg);
+
+ int (*msg_queue_alloc_security)(struct msg_queue *msq);
+ void (*msg_queue_free_security)(struct msg_queue *msq);
+ int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
+ int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
+ int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
+ int msqflg);
+ int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
+ struct task_struct *target, long type,
+ int mode);
+
+ int (*shm_alloc_security)(struct shmid_kernel *shp);
+ void (*shm_free_security)(struct shmid_kernel *shp);
+ int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
+ int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
+ int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
+ int shmflg);
+
+ int (*sem_alloc_security)(struct sem_array *sma);
+ void (*sem_free_security)(struct sem_array *sma);
+ int (*sem_associate)(struct sem_array *sma, int semflg);
+ int (*sem_semctl)(struct sem_array *sma, int cmd);
+ int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
+ unsigned nsops, int alter);
+
+ int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
+
+ void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
+
+ int (*getprocattr)(struct task_struct *p, char *name, char **value);
+ int (*setprocattr)(struct task_struct *p, char *name, void *value,
+ size_t size);
+ int (*ismaclabel)(const char *name);
+ int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
+ int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
+ void (*release_secctx)(char *secdata, u32 seclen);
+
+ int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
+ int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
+ int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
+
+#ifdef CONFIG_SECURITY_NETWORK
+ int (*unix_stream_connect)(struct sock *sock, struct sock *other,
+ struct sock *newsk);
+ int (*unix_may_send)(struct socket *sock, struct socket *other);
+
+ int (*socket_create)(int family, int type, int protocol, int kern);
+ int (*socket_post_create)(struct socket *sock, int family, int type,
+ int protocol, int kern);
+ int (*socket_bind)(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+ int (*socket_connect)(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+ int (*socket_listen)(struct socket *sock, int backlog);
+ int (*socket_accept)(struct socket *sock, struct socket *newsock);
+ int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
+ int size);
+ int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
+ int size, int flags);
+ int (*socket_getsockname)(struct socket *sock);
+ int (*socket_getpeername)(struct socket *sock);
+ int (*socket_getsockopt)(struct socket *sock, int level, int optname);
+ int (*socket_setsockopt)(struct socket *sock, int level, int optname);
+ int (*socket_shutdown)(struct socket *sock, int how);
+ int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
+ int (*socket_getpeersec_stream)(struct socket *sock,
+ char __user *optval,
+ int __user *optlen, unsigned len);
+ int (*socket_getpeersec_dgram)(struct socket *sock,
+ struct sk_buff *skb, u32 *secid);
+ int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
+ void (*sk_free_security)(struct sock *sk);
+ void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
+ void (*sk_getsecid)(struct sock *sk, u32 *secid);
+ void (*sock_graft)(struct sock *sk, struct socket *parent);
+ int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+ void (*inet_csk_clone)(struct sock *newsk,
+ const struct request_sock *req);
+ void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
+ int (*secmark_relabel_packet)(u32 secid);
+ void (*secmark_refcount_inc)(void);
+ void (*secmark_refcount_dec)(void);
+ void (*req_classify_flow)(const struct request_sock *req,
+ struct flowi *fl);
+ int (*tun_dev_alloc_security)(void **security);
+ void (*tun_dev_free_security)(void *security);
+ int (*tun_dev_create)(void);
+ int (*tun_dev_attach_queue)(void *security);
+ int (*tun_dev_attach)(struct sock *sk, void *security);
+ int (*tun_dev_open)(void *security);
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp);
+ int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx);
+ void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
+ int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
+ int (*xfrm_state_alloc)(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
+ void (*xfrm_state_free_security)(struct xfrm_state *x);
+ int (*xfrm_state_delete_security)(struct xfrm_state *x);
+ int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
+ u8 dir);
+ int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
+ int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+ int (*key_alloc)(struct key *key, const struct cred *cred,
+ unsigned long flags);
+ void (*key_free)(struct key *key);
+ int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
+ unsigned perm);
+ int (*key_getsecurity)(struct key *key, char **_buffer);
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+ int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
+ void **lsmrule);
+ int (*audit_rule_known)(struct audit_krule *krule);
+ int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
+ struct audit_context *actx);
+ void (*audit_rule_free)(void *lsmrule);
+#endif /* CONFIG_AUDIT */
+};
+
+struct security_hook_heads {
+ struct list_head binder_set_context_mgr;
+ struct list_head binder_transaction;
+ struct list_head binder_transfer_binder;
+ struct list_head binder_transfer_file;
+ struct list_head ptrace_access_check;
+ struct list_head ptrace_traceme;
+ struct list_head capget;
+ struct list_head capset;
+ struct list_head capable;
+ struct list_head quotactl;
+ struct list_head quota_on;
+ struct list_head syslog;
+ struct list_head settime;
+ struct list_head vm_enough_memory;
+ struct list_head bprm_set_creds;
+ struct list_head bprm_check_security;
+ struct list_head bprm_secureexec;
+ struct list_head bprm_committing_creds;
+ struct list_head bprm_committed_creds;
+ struct list_head sb_alloc_security;
+ struct list_head sb_free_security;
+ struct list_head sb_copy_data;
+ struct list_head sb_remount;
+ struct list_head sb_kern_mount;
+ struct list_head sb_show_options;
+ struct list_head sb_statfs;
+ struct list_head sb_mount;
+ struct list_head sb_umount;
+ struct list_head sb_pivotroot;
+ struct list_head sb_set_mnt_opts;
+ struct list_head sb_clone_mnt_opts;
+ struct list_head sb_parse_opts_str;
+ struct list_head dentry_init_security;
+#ifdef CONFIG_SECURITY_PATH
+ struct list_head path_unlink;
+ struct list_head path_mkdir;
+ struct list_head path_rmdir;
+ struct list_head path_mknod;
+ struct list_head path_truncate;
+ struct list_head path_symlink;
+ struct list_head path_link;
+ struct list_head path_rename;
+ struct list_head path_chmod;
+ struct list_head path_chown;
+ struct list_head path_chroot;
+#endif
+ struct list_head inode_alloc_security;
+ struct list_head inode_free_security;
+ struct list_head inode_init_security;
+ struct list_head inode_create;
+ struct list_head inode_link;
+ struct list_head inode_unlink;
+ struct list_head inode_symlink;
+ struct list_head inode_mkdir;
+ struct list_head inode_rmdir;
+ struct list_head inode_mknod;
+ struct list_head inode_rename;
+ struct list_head inode_readlink;
+ struct list_head inode_follow_link;
+ struct list_head inode_permission;
+ struct list_head inode_setattr;
+ struct list_head inode_getattr;
+ struct list_head inode_setxattr;
+ struct list_head inode_post_setxattr;
+ struct list_head inode_getxattr;
+ struct list_head inode_listxattr;
+ struct list_head inode_removexattr;
+ struct list_head inode_need_killpriv;
+ struct list_head inode_killpriv;
+ struct list_head inode_getsecurity;
+ struct list_head inode_setsecurity;
+ struct list_head inode_listsecurity;
+ struct list_head inode_getsecid;
+ struct list_head file_permission;
+ struct list_head file_alloc_security;
+ struct list_head file_free_security;
+ struct list_head file_ioctl;
+ struct list_head mmap_addr;
+ struct list_head mmap_file;
+ struct list_head file_mprotect;
+ struct list_head file_lock;
+ struct list_head file_fcntl;
+ struct list_head file_set_fowner;
+ struct list_head file_send_sigiotask;
+ struct list_head file_receive;
+ struct list_head file_open;
+ struct list_head task_create;
+ struct list_head task_free;
+ struct list_head cred_alloc_blank;
+ struct list_head cred_free;
+ struct list_head cred_prepare;
+ struct list_head cred_transfer;
+ struct list_head kernel_act_as;
+ struct list_head kernel_create_files_as;
+ struct list_head kernel_fw_from_file;
+ struct list_head kernel_module_request;
+ struct list_head kernel_module_from_file;
+ struct list_head task_fix_setuid;
+ struct list_head task_setpgid;
+ struct list_head task_getpgid;
+ struct list_head task_getsid;
+ struct list_head task_getsecid;
+ struct list_head task_setnice;
+ struct list_head task_setioprio;
+ struct list_head task_getioprio;
+ struct list_head task_setrlimit;
+ struct list_head task_setscheduler;
+ struct list_head task_getscheduler;
+ struct list_head task_movememory;
+ struct list_head task_kill;
+ struct list_head task_wait;
+ struct list_head task_prctl;
+ struct list_head task_to_inode;
+ struct list_head ipc_permission;
+ struct list_head ipc_getsecid;
+ struct list_head msg_msg_alloc_security;
+ struct list_head msg_msg_free_security;
+ struct list_head msg_queue_alloc_security;
+ struct list_head msg_queue_free_security;
+ struct list_head msg_queue_associate;
+ struct list_head msg_queue_msgctl;
+ struct list_head msg_queue_msgsnd;
+ struct list_head msg_queue_msgrcv;
+ struct list_head shm_alloc_security;
+ struct list_head shm_free_security;
+ struct list_head shm_associate;
+ struct list_head shm_shmctl;
+ struct list_head shm_shmat;
+ struct list_head sem_alloc_security;
+ struct list_head sem_free_security;
+ struct list_head sem_associate;
+ struct list_head sem_semctl;
+ struct list_head sem_semop;
+ struct list_head netlink_send;
+ struct list_head d_instantiate;
+ struct list_head getprocattr;
+ struct list_head setprocattr;
+ struct list_head ismaclabel;
+ struct list_head secid_to_secctx;
+ struct list_head secctx_to_secid;
+ struct list_head release_secctx;
+ struct list_head inode_notifysecctx;
+ struct list_head inode_setsecctx;
+ struct list_head inode_getsecctx;
+#ifdef CONFIG_SECURITY_NETWORK
+ struct list_head unix_stream_connect;
+ struct list_head unix_may_send;
+ struct list_head socket_create;
+ struct list_head socket_post_create;
+ struct list_head socket_bind;
+ struct list_head socket_connect;
+ struct list_head socket_listen;
+ struct list_head socket_accept;
+ struct list_head socket_sendmsg;
+ struct list_head socket_recvmsg;
+ struct list_head socket_getsockname;
+ struct list_head socket_getpeername;
+ struct list_head socket_getsockopt;
+ struct list_head socket_setsockopt;
+ struct list_head socket_shutdown;
+ struct list_head socket_sock_rcv_skb;
+ struct list_head socket_getpeersec_stream;
+ struct list_head socket_getpeersec_dgram;
+ struct list_head sk_alloc_security;
+ struct list_head sk_free_security;
+ struct list_head sk_clone_security;
+ struct list_head sk_getsecid;
+ struct list_head sock_graft;
+ struct list_head inet_conn_request;
+ struct list_head inet_csk_clone;
+ struct list_head inet_conn_established;
+ struct list_head secmark_relabel_packet;
+ struct list_head secmark_refcount_inc;
+ struct list_head secmark_refcount_dec;
+ struct list_head req_classify_flow;
+ struct list_head tun_dev_alloc_security;
+ struct list_head tun_dev_free_security;
+ struct list_head tun_dev_create;
+ struct list_head tun_dev_attach_queue;
+ struct list_head tun_dev_attach;
+ struct list_head tun_dev_open;
+ struct list_head skb_owned_by;
+#endif /* CONFIG_SECURITY_NETWORK */
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ struct list_head xfrm_policy_alloc_security;
+ struct list_head xfrm_policy_clone_security;
+ struct list_head xfrm_policy_free_security;
+ struct list_head xfrm_policy_delete_security;
+ struct list_head xfrm_state_alloc;
+ struct list_head xfrm_state_alloc_acquire;
+ struct list_head xfrm_state_free_security;
+ struct list_head xfrm_state_delete_security;
+ struct list_head xfrm_policy_lookup;
+ struct list_head xfrm_state_pol_flow_match;
+ struct list_head xfrm_decode_session;
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+#ifdef CONFIG_KEYS
+ struct list_head key_alloc;
+ struct list_head key_free;
+ struct list_head key_permission;
+ struct list_head key_getsecurity;
+#endif /* CONFIG_KEYS */
+#ifdef CONFIG_AUDIT
+ struct list_head audit_rule_init;
+ struct list_head audit_rule_known;
+ struct list_head audit_rule_match;
+ struct list_head audit_rule_free;
+#endif /* CONFIG_AUDIT */
+};
+
+/*
+ * Security module hook list structure.
+ * For use with generic list macros for common operations.
+ */
+struct security_hook_list {
+ struct list_head list;
+ struct list_head *head;
+ union security_list_options hook;
+};
+
+/*
+ * Initializing a security_hook_list structure takes
+ * up a lot of space in a source file. This macro takes
+ * care of the common case and reduces the amount of
+ * text involved.
+ */
+#define LSM_HOOK_INIT(HEAD, HOOK) \
+ { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
+
+extern struct security_hook_heads security_hook_heads;
+
+static inline void security_add_hooks(struct security_hook_list *hooks,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ list_add_tail_rcu(&hooks[i].list, hooks[i].head);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+/*
+ * Assuring the safety of deleting a security module is up to
+ * the security module involved. This may entail ordering the
+ * module's hook list in a particular way, refusing to disable
+ * the module once a policy is loaded or any number of other
+ * actions better imagined than described.
+ *
+ * The name of the configuration option reflects the only module
+ * that currently uses the mechanism. Any developer who thinks
+ * disabling their module is a good idea needs to be at least as
+ * careful as the SELinux team.
+ */
+static inline void security_delete_hooks(struct security_hook_list *hooks,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ list_del_rcu(&hooks[i].list);
+}
+#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+
+extern int __init security_module_enable(const char *module);
+extern void __init capability_add_hooks(void);
+#ifdef CONFIG_SECURITY_YAMA
+extern void __init yama_add_hooks(void);
+#else
+static inline void __init yama_add_hooks(void) { }
+#endif
+
+#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/kernel/include/linux/mailbox_client.h b/kernel/include/linux/mailbox_client.h
index 1726ccbd8..443487109 100644
--- a/kernel/include/linux/mailbox_client.h
+++ b/kernel/include/linux/mailbox_client.h
@@ -40,6 +40,8 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
int mbox_send_message(struct mbox_chan *chan, void *mssg);
void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
diff --git a/kernel/include/linux/mailbox_controller.h b/kernel/include/linux/mailbox_controller.h
index d4cf96f07..74deadb42 100644
--- a/kernel/include/linux/mailbox_controller.h
+++ b/kernel/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/completion.h>
@@ -67,12 +67,13 @@ struct mbox_chan_ops {
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
* @of_xlate: Controller driver specific mapping of channel via DT
- * @poll: API private. Used to poll for TXDONE on all channels.
+ * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
+ * channels.
* @node: API private. To hook into list of controllers.
*/
struct mbox_controller {
struct device *dev;
- struct mbox_chan_ops *ops;
+ const struct mbox_chan_ops *ops;
struct mbox_chan *chans;
int num_chans;
bool txdone_irq;
@@ -81,7 +82,7 @@ struct mbox_controller {
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
- struct timer_list poll;
+ struct hrtimer poll_hrt;
struct list_head node;
};
diff --git a/kernel/include/linux/marvell_phy.h b/kernel/include/linux/marvell_phy.h
index e6982ac32..a57f0dfb6 100644
--- a/kernel/include/linux/marvell_phy.h
+++ b/kernel/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
#define MARVELL_PHY_ID_88E1318S 0x01410e90
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
+#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E3016 0x01410e60
/* struct phy_device dev_flags definitions */
diff --git a/kernel/include/linux/math64.h b/kernel/include/linux/math64.h
index c45c089bf..6e8b5b270 100644
--- a/kernel/include/linux/math64.h
+++ b/kernel/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
#else
#ifndef mul_u64_u32_shr
@@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+ union {
+ u64 ll;
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 high, low;
+#else
+ u32 low, high;
+#endif
+ } l;
+ } rl, rm, rn, rh, a0, b0;
+ u64 c;
+
+ a0.ll = a;
+ b0.ll = b;
+
+ rl.ll = (u64)a0.l.low * b0.l.low;
+ rm.ll = (u64)a0.l.low * b0.l.high;
+ rn.ll = (u64)a0.l.high * b0.l.low;
+ rh.ll = (u64)a0.l.high * b0.l.high;
+
+ /*
+ * Each of these lines computes a 64-bit intermediate result into "c",
+ * starting at bits 32-95. The low 32-bits go into the result of the
+ * multiplication, the high 32-bits are carried into the next step.
+ */
+ rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+ rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+ rh.l.high = (c >> 32) + rh.l.high;
+
+ /*
+ * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+ * shift it right and throw away the high part of the result.
+ */
+ if (shift == 0)
+ return rl.ll;
+ if (shift < 64)
+ return (rl.ll >> shift) | (rh.ll << (64 - shift));
+ return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
#endif
+#ifndef mul_u64_u32_div
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
+{
+ union {
+ u64 ll;
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 high, low;
+#else
+ u32 low, high;
+#endif
+ } l;
+ } u, rl, rh;
+
+ u.ll = a;
+ rl.ll = (u64)u.l.low * mul;
+ rh.ll = (u64)u.l.high * mul + rl.l.high;
+
+ /* Bits 32-63 of the result will be in rh.l.low. */
+ rl.l.high = do_div(rh.ll, divisor);
+
+ /* Bits 0-31 of the result will be in rl.l.low. */
+ do_div(rl.ll, divisor);
+
+ rl.l.high = rh.l.low;
+ return rl.ll;
+}
+#endif /* mul_u64_u32_div */
+
#endif /* _LINUX_MATH64_H */
diff --git a/kernel/include/linux/mbus.h b/kernel/include/linux/mbus.h
index 611b69fa8..1f7bc630d 100644
--- a/kernel/include/linux/mbus.h
+++ b/kernel/include/linux/mbus.h
@@ -54,11 +54,16 @@ struct mbus_dram_target_info
*/
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
return NULL;
}
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return NULL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 *store_addr);
diff --git a/kernel/include/linux/mei_cl_bus.h b/kernel/include/linux/mei_cl_bus.h
index 0819d36a3..e74691953 100644
--- a/kernel/include/linux/mei_cl_bus.h
+++ b/kernel/include/linux/mei_cl_bus.h
@@ -6,6 +6,55 @@
#include <linux/mod_devicetable.h>
struct mei_cl_device;
+struct mei_device;
+
+typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
+ u32 events, void *context);
+
+/**
+ * struct mei_cl_device - MEI device handle
+ * An mei_cl_device pointer is returned from mei_add_device()
+ * and links MEI bus clients to their actual ME host client pointer.
+ * Drivers for MEI devices will get an mei_cl_device pointer
+ * when being probed and shall use it for doing ME bus I/O.
+ *
+ * @bus_list: device on the bus list
+ * @bus: parent mei device
+ * @dev: linux driver model device pointer
+ * @me_cl: me client
+ * @cl: mei client
+ * @name: device name
+ * @event_work: async work to execute event callback
+ * @event_cb: Drivers register this callback to get asynchronous ME
+ * events (e.g. Rx buffer pending) notifications.
+ * @event_context: event callback run context
+ * @events_mask: Events bit mask requested by driver.
+ * @events: Events bitmask sent to the driver.
+ *
+ * @do_match: wheather device can be matched with a driver
+ * @is_added: device is already scanned
+ * @priv_data: client private data
+ */
+struct mei_cl_device {
+ struct list_head bus_list;
+ struct mei_device *bus;
+ struct device dev;
+
+ struct mei_me_client *me_cl;
+ struct mei_cl *cl;
+ char name[MEI_CL_NAME_SIZE];
+
+ struct work_struct event_work;
+ mei_cldev_event_cb_t event_cb;
+ void *event_context;
+ unsigned long events_mask;
+ unsigned long events;
+
+ unsigned int do_match:1;
+ unsigned int is_added:1;
+
+ void *priv_data;
+};
struct mei_cl_driver {
struct device_driver driver;
@@ -13,33 +62,37 @@ struct mei_cl_driver {
const struct mei_cl_device_id *id_table;
- int (*probe)(struct mei_cl_device *dev,
+ int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *dev);
+ int (*remove)(struct mei_cl_device *cldev);
};
-int __mei_cl_driver_register(struct mei_cl_driver *driver,
+int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
struct module *owner);
-#define mei_cl_driver_register(driver) \
- __mei_cl_driver_register(driver, THIS_MODULE)
+#define mei_cldev_driver_register(cldrv) \
+ __mei_cldev_driver_register(cldrv, THIS_MODULE)
-void mei_cl_driver_unregister(struct mei_cl_driver *driver);
+void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
-ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
-ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
-typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
- u32 events, void *context);
-int mei_cl_register_event_cb(struct mei_cl_device *device,
- mei_cl_event_cb_t read_cb, void *context);
+int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
+ unsigned long event_mask,
+ mei_cldev_event_cb_t read_cb, void *context);
#define MEI_CL_EVENT_RX 0
#define MEI_CL_EVENT_TX 1
+#define MEI_CL_EVENT_NOTIF 2
+
+const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
+u8 mei_cldev_ver(const struct mei_cl_device *cldev);
-void *mei_cl_get_drvdata(const struct mei_cl_device *device);
-void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
+void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
+void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
-int mei_cl_enable_device(struct mei_cl_device *device);
-int mei_cl_disable_device(struct mei_cl_device *device);
+int mei_cldev_enable(struct mei_cl_device *cldev);
+int mei_cldev_disable(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/kernel/include/linux/memblock.h b/kernel/include/linux/memblock.h
index 9497ec7c7..24daf8fc4 100644
--- a/kernel/include/linux/memblock.h
+++ b/kernel/include/linux/memblock.h
@@ -21,7 +21,11 @@
#define INIT_PHYSMEM_REGIONS 4
/* Definition of memblock flags. */
-#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
+enum {
+ MEMBLOCK_NONE = 0x0, /* No special request */
+ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
+ MEMBLOCK_MIRROR = 0x2, /* mirrored region */
+};
struct memblock_region {
phys_addr_t base;
@@ -61,7 +65,7 @@ extern bool movable_node_enabled;
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid);
+ int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -73,26 +77,31 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
+bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
+int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+ulong choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
int nid, unsigned long flags);
-int memblock_remove_range(struct memblock_type *type,
- phys_addr_t base,
- phys_addr_t size);
-
-void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
+void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
+ phys_addr_t *out_end);
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
@@ -100,16 +109,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, \
+#define for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
- for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
+ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range(&i, nid, type_a, type_b, \
+ __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
@@ -119,19 +129,35 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, \
+#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
+/**
+ * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over reserved areas of memblock. Available as soon as memblock
+ * is initialized.
+ */
+#define for_each_reserved_mem_region(i, p_start, p_end) \
+ for (i = 0UL, \
+ __next_reserved_mem_region(&i, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_reserved_mem_region(&i, p_start, p_end))
+
#ifdef CONFIG_MOVABLE_NODE
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -153,6 +179,11 @@ static inline bool movable_node_is_enabled(void)
}
#endif
+static inline bool memblock_is_mirror(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_MIRROR;
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
@@ -181,13 +212,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock. Available as
* soon as memblock is initialized.
*/
-#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +228,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock in reverse
* order. Available as soon as memblock is initialized.
*/
-#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
+ p_nid) \
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
static inline void memblock_set_region_flags(struct memblock_region *r,
unsigned long flags)
@@ -273,7 +307,8 @@ static inline bool memblock_bottom_up(void) { return false; }
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end);
+ phys_addr_t start, phys_addr_t end,
+ ulong flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -286,7 +321,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
-int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
extern void __memblock_dump_all(void);
diff --git a/kernel/include/linux/memcontrol.h b/kernel/include/linux/memcontrol.h
index 6c8918114..cd0e2413c 100644
--- a/kernel/include/linux/memcontrol.h
+++ b/kernel/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/page_counter.h>
+#include <linux/vmpressure.h>
+#include <linux/eventfd.h>
+#include <linux/mmzone.h>
+#include <linux/writeback.h>
struct mem_cgroup;
struct page;
@@ -41,6 +46,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
@@ -66,10 +72,224 @@ enum mem_cgroup_events_index {
MEMCG_NR_EVENTS,
};
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH,
+ MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_TARGET_NUMAINFO,
+ MEM_CGROUP_NTARGETS,
+};
+
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
+struct cg_proto {
+ struct page_counter memory_allocated; /* Current allocated memory. */
+ struct percpu_counter sockets_allocated; /* Current number of sockets. */
+ int memory_pressure;
+ long sysctl_mem[3];
+ unsigned long flags;
+ /*
+ * memcg field is used to find which memcg we belong directly
+ * Each memcg struct can hold more than one cg_proto, so container_of
+ * won't really cut.
+ *
+ * The elegant solution would be having an inverse function to
+ * proto_cgroup in struct proto, but that means polluting the structure
+ * for everybody, instead of just for memcg users.
+ */
+ struct mem_cgroup *memcg;
+};
+
#ifdef CONFIG_MEMCG
-void mem_cgroup_events(struct mem_cgroup *memcg,
+struct mem_cgroup_stat_cpu {
+ long count[MEM_CGROUP_STAT_NSTATS];
+ unsigned long events[MEMCG_NR_EVENTS];
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+struct mem_cgroup_reclaim_iter {
+ struct mem_cgroup *position;
+ /* scan generation, increased every round-trip */
+ unsigned int generation;
+};
+
+/*
+ * per-zone information in memory controller.
+ */
+struct mem_cgroup_per_zone {
+ struct lruvec lruvec;
+ unsigned long lru_size[NR_LRU_LISTS];
+
+ struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
+};
+
+struct mem_cgroup_per_node {
+ struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_threshold {
+ struct eventfd_ctx *eventfd;
+ unsigned long threshold;
+};
+
+/* For threshold */
+struct mem_cgroup_threshold_ary {
+ /* An array index points to threshold just below or equal to usage. */
+ int current_threshold;
+ /* Size of entries[] */
+ unsigned int size;
+ /* Array of thresholds */
+ struct mem_cgroup_threshold entries[0];
+};
+
+struct mem_cgroup_thresholds {
+ /* Primary thresholds array */
+ struct mem_cgroup_threshold_ary *primary;
+ /*
+ * Spare threshold array.
+ * This is needed to make mem_cgroup_unregister_event() "never fail".
+ * It must be able to store at least primary->size - 1 entries.
+ */
+ struct mem_cgroup_threshold_ary *spare;
+};
+
+/*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+ * statistics based on the statistics developed by Rik Van Riel for clock-pro,
+ * to help the administrator determine what knobs to tune.
+ */
+struct mem_cgroup {
+ struct cgroup_subsys_state css;
+
+ /* Accounted resources */
+ struct page_counter memory;
+ struct page_counter memsw;
+ struct page_counter kmem;
+
+ /* Normal memory consumption range */
+ unsigned long low;
+ unsigned long high;
+
+ unsigned long soft_limit;
+
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
+ /* css_online() has been completed */
+ int initialized;
+
+ /*
+ * Should the accounting and control be hierarchical, per subtree?
+ */
+ bool use_hierarchy;
+
+ /* protected by memcg_oom_lock */
+ bool oom_lock;
+ int under_oom;
+
+ int swappiness;
+ /* OOM-Killer disable */
+ int oom_kill_disable;
+
+ /* handle for "memory.events" */
+ struct cgroup_file events_file;
+
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /*
+ * Should we move charges of a task when a task is moved into this
+ * mem_cgroup ? And what type of charges should we move ?
+ */
+ unsigned long move_charge_at_immigrate;
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ struct task_struct *move_lock_task;
+ unsigned long move_lock_flags;
+ /*
+ * percpu counter.
+ */
+ struct mem_cgroup_stat_cpu __percpu *stat;
+
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+ struct cg_proto tcp_mem;
+#endif
+#if defined(CONFIG_MEMCG_KMEM)
+ /* Index in the kmem_cache->memcg_params.memcg_caches array */
+ int kmemcg_id;
+ bool kmem_acct_activated;
+ bool kmem_acct_active;
+#endif
+
+ int last_scanned_node;
+#if MAX_NUMNODES > 1
+ nodemask_t scan_nodes;
+ atomic_t numainfo_events;
+ atomic_t numainfo_updating;
+#endif
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+ struct wb_domain cgwb_domain;
+#endif
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+
+ struct mem_cgroup_per_node *nodeinfo[0];
+ /* WARNING: nodeinfo must be the last member here */
+};
+extern struct cgroup_subsys_state *mem_cgroup_root_css;
+
+/**
+ * mem_cgroup_events - count memory events against a cgroup
+ * @memcg: the memory cgroup
+ * @idx: the event index
+ * @nr: the number of events to account for
+ */
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
- unsigned int nr);
+ unsigned int nr)
+{
+ this_cpu_add(memcg->stat->events[idx], nr);
+ cgroup_file_notify(&memcg->events_file);
+}
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
@@ -81,21 +301,34 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
- bool lrucare);
+void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
-bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
- struct mem_cgroup *root);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
-extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
+ return css ? container_of(css, struct mem_cgroup, css) : NULL;
+}
-extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
+static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+ struct mem_cgroup *root)
+{
+ if (root == memcg)
+ return true;
+ if (!root->use_hierarchy)
+ return false;
+ return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
+}
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
@@ -111,39 +344,84 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
+struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+ino_t page_cgroup_ino(struct page *page);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
-void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline bool mem_cgroup_disabled(void)
+{
+ return !cgroup_subsys_enabled(memory_cgrp_subsys);
+}
/*
* For memory reclaim.
*/
-int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
-bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
-void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
-extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
- struct task_struct *p);
+
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int nr_pages);
+
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return true;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ memcg = mz->memcg;
+
+ return !!(memcg->css.flags & CSS_ONLINE);
+}
+
+static inline
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ return mz->lru_size[lru];
+}
+
+static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+{
+ unsigned long inactive_ratio;
+ unsigned long inactive;
+ unsigned long active;
+ unsigned long gb;
+
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
+
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+ if (gb)
+ inactive_ratio = int_sqrt(10 * gb);
+ else
+ inactive_ratio = 1;
+
+ return inactive * inactive_ratio < active;
+}
+
+void mem_cgroup_handle_over_high(void);
+
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
static inline void mem_cgroup_oom_enable(void)
{
- WARN_ON(current->memcg_oom.may_oom);
- current->memcg_oom.may_oom = 1;
+ WARN_ON(current->memcg_may_oom);
+ current->memcg_may_oom = 1;
}
static inline void mem_cgroup_oom_disable(void)
{
- WARN_ON(!current->memcg_oom.may_oom);
- current->memcg_oom.may_oom = 0;
+ WARN_ON(!current->memcg_may_oom);
+ current->memcg_may_oom = 0;
}
static inline bool task_in_memcg_oom(struct task_struct *p)
{
- return p->memcg_oom.memcg;
+ return p->memcg_in_oom;
}
bool mem_cgroup_oom_synchronize(bool wait);
@@ -152,18 +430,26 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-static inline bool mem_cgroup_disabled(void)
-{
- if (memory_cgrp_subsys.disabled)
- return true;
- return false;
-}
-
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+/**
+ * mem_cgroup_update_page_stat - update page state statistics
+ * @memcg: memcg to account against
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ *
+ * See mem_cgroup_begin_page_stat() for locking requirements.
+ */
+static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ VM_BUG_ON(!rcu_read_lock_held());
+
+ if (memcg)
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
@@ -180,13 +466,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
enum vm_event_item idx)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- __mem_cgroup_count_vm_event(mm, idx);
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ goto out;
+
+ switch (idx) {
+ case PGFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
+ break;
+ case PGMAJFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ break;
+ default:
+ BUG();
+ }
+out:
+ rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
@@ -234,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *oldpage,
- struct page *newpage,
- bool lrucare)
+static inline void mem_cgroup_replace_page(struct page *old, struct page *new)
{
}
@@ -252,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
return &zone->lruvec;
}
-static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
-{
- return NULL;
-}
-
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
@@ -269,12 +566,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
return true;
}
-static inline struct cgroup_subsys_state
- *mem_cgroup_css(struct mem_cgroup *memcg)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -293,10 +584,10 @@ static inline bool mem_cgroup_disabled(void)
return true;
}
-static inline int
+static inline bool
mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
- return 1;
+ return true;
}
static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
@@ -330,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
}
+static inline void mem_cgroup_handle_over_high(void)
+{
+}
+
static inline void mem_cgroup_oom_enable(void)
{
}
@@ -382,6 +677,31 @@ enum {
OVER_LIMIT,
};
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
+struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
+ unsigned long *pheadroom, unsigned long *pdirty,
+ unsigned long *pwriteback);
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
+ unsigned long *pfilepages,
+ unsigned long *pheadroom,
+ unsigned long *pdirty,
+ unsigned long *pwriteback)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
struct sock;
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
void sock_update_memcg(struct sock *sk);
@@ -399,8 +719,8 @@ static inline void sock_release_memcg(struct sock *sk)
extern struct static_key memcg_kmem_enabled_key;
extern int memcg_nr_cache_ids;
-extern void memcg_get_cache_ids(void);
-extern void memcg_put_cache_ids(void);
+void memcg_get_cache_ids(void);
+void memcg_put_cache_ids(void);
/*
* Helper macro to loop through all memcg-specific caches. Callers must still
@@ -415,7 +735,10 @@ static inline bool memcg_kmem_enabled(void)
return static_key_false(&memcg_kmem_enabled_key);
}
-bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return memcg->kmem_acct_active;
+}
/*
* In general, we'll do everything in our power to not incur in any overhead
@@ -428,88 +751,60 @@ bool memcg_kmem_is_active(struct mem_cgroup *memcg);
* conditions, but because they are pretty simple, they are expected to be
* fast.
*/
-bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
- int order);
-void __memcg_kmem_commit_charge(struct page *page,
- struct mem_cgroup *memcg, int order);
-void __memcg_kmem_uncharge_pages(struct page *page, int order);
+int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+ struct mem_cgroup *memcg);
+int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge(struct page *page, int order);
-int memcg_cache_id(struct mem_cgroup *memcg);
+/*
+ * helper for acessing a memcg's index. It will be used as an index in the
+ * child cache array in kmem_cache, and also to derive its name. This function
+ * will return -1 when this is not a kmem-limited memcg.
+ */
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return memcg ? memcg->kmemcg_id : -1;
+}
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
-struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
-
-int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned long nr_pages);
-void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
-
-/**
- * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
- * @gfp: the gfp allocation flags.
- * @memcg: a pointer to the memcg this was charged against.
- * @order: allocation order.
- *
- * returns true if the memcg where the current task belongs can hold this
- * allocation.
- *
- * We return true automatically if this allocation is not to be accounted to
- * any memcg.
- */
-static inline bool
-memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+static inline bool __memcg_kmem_bypass(gfp_t gfp)
{
if (!memcg_kmem_enabled())
return true;
-
if (gfp & __GFP_NOACCOUNT)
return true;
- /*
- * __GFP_NOFAIL allocations will move on even if charging is not
- * possible. Therefore we don't even try, and have this allocation
- * unaccounted. We could in theory charge it forcibly, but we hope
- * those allocations are rare, and won't be worth the trouble.
- */
- if (gfp & __GFP_NOFAIL)
- return true;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
return true;
-
- /* If the test is dying, just let it go. */
- if (unlikely(fatal_signal_pending(current)))
- return true;
-
- return __memcg_kmem_newpage_charge(gfp, memcg, order);
+ return false;
}
/**
- * memcg_kmem_uncharge_pages: uncharge pages from memcg
- * @page: pointer to struct page being freed
- * @order: allocation order.
+ * memcg_kmem_charge: charge a kmem page
+ * @page: page to charge
+ * @gfp: reclaim mode
+ * @order: allocation order
+ *
+ * Returns 0 on success, an error code on failure.
*/
-static inline void
-memcg_kmem_uncharge_pages(struct page *page, int order)
+static __always_inline int memcg_kmem_charge(struct page *page,
+ gfp_t gfp, int order)
{
- if (memcg_kmem_enabled())
- __memcg_kmem_uncharge_pages(page, order);
+ if (__memcg_kmem_bypass(gfp))
+ return 0;
+ return __memcg_kmem_charge(page, gfp, order);
}
/**
- * memcg_kmem_commit_charge: embeds correct memcg in a page
- * @page: pointer to struct page recently allocated
- * @memcg: the memcg structure we charged against
- * @order: allocation order.
- *
- * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
- * failure of the allocation. if @page is NULL, this function will revert the
- * charges. Otherwise, it will commit @page to @memcg.
+ * memcg_kmem_uncharge: uncharge a kmem page
+ * @page: page to uncharge
+ * @order: allocation order
*/
-static inline void
-memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
{
- if (memcg_kmem_enabled() && memcg)
- __memcg_kmem_commit_charge(page, memcg, order);
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge(page, order);
}
/**
@@ -522,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
- if (!memcg_kmem_enabled())
- return cachep;
- if (gfp & __GFP_NOACCOUNT)
- return cachep;
- if (gfp & __GFP_NOFAIL)
- return cachep;
- if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+ if (__memcg_kmem_bypass(gfp))
return cachep;
- if (unlikely(fatal_signal_pending(current)))
- return cachep;
-
return __memcg_kmem_get_cache(cachep);
}
@@ -541,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}
-
-static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
-{
- if (!memcg_kmem_enabled())
- return NULL;
- return __mem_cgroup_from_kmem(ptr);
-}
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -562,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
return false;
}
-static inline bool
-memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
-{
- return true;
-}
-
-static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
+static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
+ return 0;
}
-static inline void
-memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+static inline void memcg_kmem_uncharge(struct page *page, int order)
{
}
@@ -599,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
-
-static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
-{
- return NULL;
-}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
-
diff --git a/kernel/include/linux/memory_hotplug.h b/kernel/include/linux/memory_hotplug.h
index 6ffa0ac7f..2ea574ff9 100644
--- a/kernel/include/linux/memory_hotplug.h
+++ b/kernel/include/linux/memory_hotplug.h
@@ -11,6 +11,7 @@ struct zone;
struct pglist_data;
struct mem_section;
struct memory_block;
+struct resource;
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -266,8 +267,10 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
-extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int add_memory_resource(int nid, struct resource *resource);
+extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
+ bool for_device);
+extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/kernel/include/linux/mfd/88pm80x.h b/kernel/include/linux/mfd/88pm80x.h
index 97cb283cc..d409ceb22 100644
--- a/kernel/include/linux/mfd/88pm80x.h
+++ b/kernel/include/linux/mfd/88pm80x.h
@@ -21,6 +21,7 @@ enum {
CHIP_INVALID = 0,
CHIP_PM800,
CHIP_PM805,
+ CHIP_PM860,
CHIP_MAX,
};
@@ -60,60 +61,60 @@ enum {
/* page 0 basic: slave adder 0x60 */
#define PM800_STATUS_1 (0x01)
-#define PM800_ONKEY_STS1 (1 << 0)
-#define PM800_EXTON_STS1 (1 << 1)
-#define PM800_CHG_STS1 (1 << 2)
-#define PM800_BAT_STS1 (1 << 3)
-#define PM800_VBUS_STS1 (1 << 4)
-#define PM800_LDO_PGOOD_STS1 (1 << 5)
-#define PM800_BUCK_PGOOD_STS1 (1 << 6)
+#define PM800_ONKEY_STS1 BIT(0)
+#define PM800_EXTON_STS1 BIT(1)
+#define PM800_CHG_STS1 BIT(2)
+#define PM800_BAT_STS1 BIT(3)
+#define PM800_VBUS_STS1 BIT(4)
+#define PM800_LDO_PGOOD_STS1 BIT(5)
+#define PM800_BUCK_PGOOD_STS1 BIT(6)
#define PM800_STATUS_2 (0x02)
-#define PM800_RTC_ALARM_STS2 (1 << 0)
+#define PM800_RTC_ALARM_STS2 BIT(0)
/* Wakeup Registers */
-#define PM800_WAKEUP1 (0x0D)
+#define PM800_WAKEUP1 (0x0D)
-#define PM800_WAKEUP2 (0x0E)
-#define PM800_WAKEUP2_INV_INT (1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR (1 << 1)
-#define PM800_WAKEUP2_INT_MASK (1 << 2)
+#define PM800_WAKEUP2 (0x0E)
+#define PM800_WAKEUP2_INV_INT BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR BIT(1)
+#define PM800_WAKEUP2_INT_MASK BIT(2)
-#define PM800_POWER_UP_LOG (0x10)
+#define PM800_POWER_UP_LOG (0x10)
/* Referance and low power registers */
#define PM800_LOW_POWER1 (0x20)
#define PM800_LOW_POWER2 (0x21)
-#define PM800_LOW_POWER_CONFIG3 (0x22)
-#define PM800_LOW_POWER_CONFIG4 (0x23)
+#define PM800_LOW_POWER_CONFIG3 (0x22)
+#define PM800_LOW_POWER_CONFIG4 (0x23)
/* GPIO register */
#define PM800_GPIO_0_1_CNTRL (0x30)
-#define PM800_GPIO0_VAL (1 << 0)
+#define PM800_GPIO0_VAL BIT(0)
#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO1_VAL (1 << 4)
+#define PM800_GPIO1_VAL BIT(4)
#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
#define PM800_GPIO_2_3_CNTRL (0x31)
-#define PM800_GPIO2_VAL (1 << 0)
+#define PM800_GPIO2_VAL BIT(0)
#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO3_VAL (1 << 4)
+#define PM800_GPIO3_VAL BIT(4)
#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
#define PM800_GPIO3_MODE_MASK 0x1F
#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
-#define PM800_GPIO_4_CNTRL (0x32)
-#define PM800_GPIO4_VAL (1 << 0)
+#define PM800_GPIO_4_CNTRL (0x32)
+#define PM800_GPIO4_VAL BIT(0)
#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
#define PM800_HEADSET_CNTRL (0x38)
-#define PM800_HEADSET_DET_EN (1 << 7)
-#define PM800_HSDET_SLP (1 << 1)
+#define PM800_HEADSET_DET_EN BIT(7)
+#define PM800_HSDET_SLP BIT(1)
/* PWM register */
-#define PM800_PWM1 (0x40)
-#define PM800_PWM2 (0x41)
-#define PM800_PWM3 (0x42)
-#define PM800_PWM4 (0x43)
+#define PM800_PWM1 (0x40)
+#define PM800_PWM2 (0x41)
+#define PM800_PWM3 (0x42)
+#define PM800_PWM4 (0x43)
/* RTC Registers */
#define PM800_RTC_CONTROL (0xD0)
@@ -123,55 +124,55 @@ enum {
#define PM800_RTC_MISC4 (0xE4)
#define PM800_RTC_MISC5 (0xE7)
/* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN (1 << 0)
-#define PM800_ALARM_WAKEUP (1 << 4)
-#define PM800_ALARM (1 << 5)
-#define PM800_RTC1_USE_XO (1 << 7)
+#define PM800_ALARM1_EN BIT(0)
+#define PM800_ALARM_WAKEUP BIT(4)
+#define PM800_ALARM BIT(5)
+#define PM800_RTC1_USE_XO BIT(7)
/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
/* buck registers */
-#define PM800_SLEEP_BUCK1 (0x30)
+#define PM800_SLEEP_BUCK1 (0x30)
/* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1 (0x5A)
-#define PM800_BUCK1_SLP1_SHIFT 0
-#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1 (0x5A)
+#define PM800_BUCK1_SLP1_SHIFT 0
+#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
/* page 2 GPADC: slave adder 0x02 */
#define PM800_GPADC_MEAS_EN1 (0x01)
-#define PM800_MEAS_EN1_VBAT (1 << 2)
+#define PM800_MEAS_EN1_VBAT BIT(2)
#define PM800_GPADC_MEAS_EN2 (0x02)
-#define PM800_MEAS_EN2_RFTMP (1 << 0)
-#define PM800_MEAS_GP0_EN (1 << 2)
-#define PM800_MEAS_GP1_EN (1 << 3)
-#define PM800_MEAS_GP2_EN (1 << 4)
-#define PM800_MEAS_GP3_EN (1 << 5)
-#define PM800_MEAS_GP4_EN (1 << 6)
+#define PM800_MEAS_EN2_RFTMP BIT(0)
+#define PM800_MEAS_GP0_EN BIT(2)
+#define PM800_MEAS_GP1_EN BIT(3)
+#define PM800_MEAS_GP2_EN BIT(4)
+#define PM800_MEAS_GP3_EN BIT(5)
+#define PM800_MEAS_GP4_EN BIT(6)
#define PM800_GPADC_MISC_CONFIG1 (0x05)
#define PM800_GPADC_MISC_CONFIG2 (0x06)
-#define PM800_GPADC_MISC_GPFSM_EN (1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN BIT(0)
#define PM800_GPADC_SLOW_MODE(x) (x << 3)
-#define PM800_GPADC_MISC_CONFIG3 (0x09)
-#define PM800_GPADC_MISC_CONFIG4 (0x0A)
+#define PM800_GPADC_MISC_CONFIG3 (0x09)
+#define PM800_GPADC_MISC_CONFIG4 (0x0A)
-#define PM800_GPADC_PREBIAS1 (0x0F)
+#define PM800_GPADC_PREBIAS1 (0x0F)
#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
-#define PM800_GPADC_PREBIAS2 (0x10)
+#define PM800_GPADC_PREBIAS2 (0x10)
-#define PM800_GP_BIAS_ENA1 (0x14)
-#define PM800_GPADC_GP_BIAS_EN0 (1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1 (1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2 (1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3 (1 << 3)
+#define PM800_GP_BIAS_ENA1 (0x14)
+#define PM800_GPADC_GP_BIAS_EN0 BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1 BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2 BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3 BIT(3)
#define PM800_GP_BIAS_OUT1 (0x15)
-#define PM800_BIAS_OUT_GP0 (1 << 0)
-#define PM800_BIAS_OUT_GP1 (1 << 1)
-#define PM800_BIAS_OUT_GP2 (1 << 2)
-#define PM800_BIAS_OUT_GP3 (1 << 3)
+#define PM800_BIAS_OUT_GP0 BIT(0)
+#define PM800_BIAS_OUT_GP1 BIT(1)
+#define PM800_BIAS_OUT_GP2 BIT(2)
+#define PM800_BIAS_OUT_GP3 BIT(3)
#define PM800_GPADC0_LOW_TH 0x20
#define PM800_GPADC1_LOW_TH 0x21
@@ -222,37 +223,37 @@ enum {
#define PM805_INT_STATUS1 (0x03)
-#define PM805_INT1_HP1_SHRT (1 << 0)
-#define PM805_INT1_HP2_SHRT (1 << 1)
-#define PM805_INT1_MIC_CONFLICT (1 << 2)
-#define PM805_INT1_CLIP_FAULT (1 << 3)
-#define PM805_INT1_LDO_OFF (1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK (1 << 5)
+#define PM805_INT1_HP1_SHRT BIT(0)
+#define PM805_INT1_HP2_SHRT BIT(1)
+#define PM805_INT1_MIC_CONFLICT BIT(2)
+#define PM805_INT1_CLIP_FAULT BIT(3)
+#define PM805_INT1_LDO_OFF BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK BIT(5)
#define PM805_INT_STATUS2 (0x04)
-#define PM805_INT2_MIC_DET (1 << 0)
-#define PM805_INT2_SHRT_BTN_DET (1 << 1)
-#define PM805_INT2_VOLM_BTN_DET (1 << 2)
-#define PM805_INT2_VOLP_BTN_DET (1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT (1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT (1 << 5)
+#define PM805_INT2_MIC_DET BIT(0)
+#define PM805_INT2_SHRT_BTN_DET BIT(1)
+#define PM805_INT2_VOLM_BTN_DET BIT(2)
+#define PM805_INT2_VOLP_BTN_DET BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT BIT(5)
#define PM805_INT_MASK1 (0x05)
#define PM805_INT_MASK2 (0x06)
-#define PM805_SHRT_BTN_DET (1 << 1)
+#define PM805_SHRT_BTN_DET BIT(1)
/* number of status and int reg in a row */
#define PM805_INT_REG_NUM (2)
#define PM805_MIC_DET1 (0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET BIT(0)
#define PM805_MIC_DET2 (0x08)
-#define PM805_MIC_DET_STATUS1 (0x09)
+#define PM805_MIC_DET_STATUS1 (0x09)
-#define PM805_MIC_DET_STATUS3 (0x0A)
-#define PM805_AUTO_SEQ_STATUS1 (0x0B)
-#define PM805_AUTO_SEQ_STATUS2 (0x0C)
+#define PM805_MIC_DET_STATUS3 (0x0A)
+#define PM805_AUTO_SEQ_STATUS1 (0x0B)
+#define PM805_AUTO_SEQ_STATUS2 (0x0C)
#define PM805_ADC_SETTING1 (0x10)
#define PM805_ADC_SETTING2 (0x11)
@@ -261,7 +262,7 @@ enum {
#define PM805_ADC_GAIN2 (0x13)
#define PM805_DMIC_SETTING (0x15)
#define PM805_DWS_SETTING (0x16)
-#define PM805_MIC_CONFLICT_STS (0x17)
+#define PM805_MIC_CONFLICT_STS (0x17)
#define PM805_PDM_SETTING1 (0x20)
#define PM805_PDM_SETTING2 (0x21)
@@ -270,11 +271,11 @@ enum {
#define PM805_PDM_CONTROL2 (0x24)
#define PM805_PDM_CONTROL3 (0x25)
-#define PM805_HEADPHONE_SETTING (0x26)
-#define PM805_HEADPHONE_GAIN_A2A (0x27)
-#define PM805_HEADPHONE_SHORT_STATE (0x28)
-#define PM805_EARPHONE_SETTING (0x29)
-#define PM805_AUTO_SEQ_SETTING (0x2A)
+#define PM805_HEADPHONE_SETTING (0x26)
+#define PM805_HEADPHONE_GAIN_A2A (0x27)
+#define PM805_HEADPHONE_SHORT_STATE (0x28)
+#define PM805_EARPHONE_SETTING (0x29)
+#define PM805_AUTO_SEQ_SETTING (0x2A)
struct pm80x_rtc_pdata {
int vrtc;
diff --git a/kernel/include/linux/mfd/arizona/core.h b/kernel/include/linux/mfd/arizona/core.h
index 16a498f48..79e607e2f 100644
--- a/kernel/include/linux/mfd/arizona/core.h
+++ b/kernel/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@ enum arizona_type {
WM5110 = 2,
WM8997 = 3,
WM8280 = 4,
+ WM8998 = 5,
+ WM1814 = 6,
};
#define ARIZONA_IRQ_GP1 0
@@ -117,6 +119,7 @@ struct arizona {
int num_core_supplies;
struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
+ bool has_fully_powered_off;
struct arizona_pdata pdata;
@@ -153,9 +156,18 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
void arizona_free_irq(struct arizona *arizona, int irq, void *data);
int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+#ifdef CONFIG_MFD_WM5102
int wm5102_patch(struct arizona *arizona);
+#else
+static inline int wm5102_patch(struct arizona *arizona)
+{
+ return 0;
+}
+#endif
+
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
bool mandatory);
diff --git a/kernel/include/linux/mfd/arizona/pdata.h b/kernel/include/linux/mfd/arizona/pdata.h
index 1789cb0f4..57b45caae 100644
--- a/kernel/include/linux/mfd/arizona/pdata.h
+++ b/kernel/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@ struct arizona_pdata {
* useful for systems where and I2S bus with multiple data
* lines is mastered.
*/
- int max_channels_clocked[ARIZONA_MAX_AIF];
+ unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
/** GPIO5 is used for jack detection */
bool jd_gpio5;
@@ -121,23 +121,29 @@ struct arizona_pdata {
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+ /** Channel to use for headphone detection */
+ unsigned int hpdet_channel;
+
+ /** Use software comparison to determine mic presence */
+ bool micd_software_compare;
+
/** Extra debounce timeout used during initial mic detection (ms) */
- int micd_detect_debounce;
+ unsigned int micd_detect_debounce;
/** GPIO for mic detection polarity */
int micd_pol_gpio;
/** Mic detect ramp rate */
- int micd_bias_start_time;
+ unsigned int micd_bias_start_time;
/** Mic detect sample rate */
- int micd_rate;
+ unsigned int micd_rate;
/** Mic detect debounce level */
- int micd_dbtime;
+ unsigned int micd_dbtime;
/** Mic detect timeout (ms) */
- int micd_timeout;
+ unsigned int micd_timeout;
/** Force MICBIAS on for mic detect */
bool micd_force_micbias;
@@ -156,7 +162,12 @@ struct arizona_pdata {
/** MICBIAS configurations */
struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
- /** Mode of input structures */
+ /**
+ * Mode of input structures
+ * One of the ARIZONA_INMODE_xxx values
+ * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+ * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
+ */
int inmode[ARIZONA_MAX_INPUT];
/** Mode for outputs */
@@ -173,6 +184,9 @@ struct arizona_pdata {
/** GPIO for primary IRQ (used for edge triggered emulation) */
int irq_gpio;
+
+ /** General purpose switch control */
+ unsigned int gpsw;
};
#endif
diff --git a/kernel/include/linux/mfd/arizona/registers.h b/kernel/include/linux/mfd/arizona/registers.h
index aacc10d77..cd7e78eae 100644
--- a/kernel/include/linux/mfd/arizona/registers.h
+++ b/kernel/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
#define ARIZONA_PWM_DRIVE_3 0x32
#define ARIZONA_WAKE_CONTROL 0x40
#define ARIZONA_SEQUENCE_CONTROL 0x41
+#define ARIZONA_SPARE_TRIGGERS 0x42
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
@@ -139,6 +140,7 @@
#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
+#define ARIZONA_MIC_DETECT_4 0x2AB
#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
#define ARIZONA_ISOLATION_CONTROL 0x2CB
#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
@@ -225,17 +227,22 @@
#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
#define ARIZONA_DRE_ENABLE 0x440
+#define ARIZONA_DRE_CONTROL_1 0x441
#define ARIZONA_DRE_CONTROL_2 0x442
#define ARIZONA_DRE_CONTROL_3 0x443
+#define ARIZONA_EDRE_ENABLE 0x448
#define ARIZONA_DAC_AEC_CONTROL_1 0x450
+#define ARIZONA_DAC_AEC_CONTROL_2 0x451
#define ARIZONA_NOISE_GATE_CONTROL 0x458
#define ARIZONA_PDM_SPK1_CTRL_1 0x490
#define ARIZONA_PDM_SPK1_CTRL_2 0x491
#define ARIZONA_PDM_SPK2_CTRL_1 0x492
#define ARIZONA_PDM_SPK2_CTRL_2 0x493
+#define ARIZONA_HP_TEST_CTRL_13 0x49A
#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
+#define ARIZONA_HP_TEST_CTRL_1 0x4A4
#define ARIZONA_SPK_CTRL_2 0x4B5
#define ARIZONA_SPK_CTRL_3 0x4B6
#define ARIZONA_DAC_COMP_1 0x4DC
@@ -310,6 +317,10 @@
#define ARIZONA_AIF3_TX_ENABLES 0x599
#define ARIZONA_AIF3_RX_ENABLES 0x59A
#define ARIZONA_AIF3_FORCE_WRITE 0x59B
+#define ARIZONA_SPD1_TX_CONTROL 0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
#define ARIZONA_SLIMBUS_RATES_1 0x5E5
#define ARIZONA_SLIMBUS_RATES_2 0x5E6
@@ -643,6 +654,10 @@
#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809
#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
@@ -868,6 +883,7 @@
#define ARIZONA_GPIO5_CTRL 0xC04
#define ARIZONA_IRQ_CTRL_1 0xC0F
#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
+#define ARIZONA_GP_SWITCH_1 0xC18
#define ARIZONA_MISC_PAD_CTRL_1 0xC20
#define ARIZONA_MISC_PAD_CTRL_2 0xC21
#define ARIZONA_MISC_PAD_CTRL_3 0xC22
@@ -1049,6 +1065,16 @@
#define ARIZONA_CLOCK_CONTROL 0xF00
#define ARIZONA_ANC_SRC 0xF01
#define ARIZONA_DSP_STATUS 0xF02
+#define ARIZONA_ANC_COEFF_START 0xF08
+#define ARIZONA_ANC_COEFF_END 0xF12
+#define ARIZONA_FCL_FILTER_CONTROL 0xF15
+#define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17
+#define ARIZONA_FCL_COEFF_START 0xF18
+#define ARIZONA_FCL_COEFF_END 0xF69
+#define ARIZONA_FCR_FILTER_CONTROL 0xF70
+#define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72
+#define ARIZONA_FCR_COEFF_START 0xF73
+#define ARIZONA_FCR_COEFF_END 0xFC4
#define ARIZONA_DSP1_CONTROL_1 0x1100
#define ARIZONA_DSP1_CLOCKING_1 0x1101
#define ARIZONA_DSP1_STATUS_1 0x1104
@@ -1169,6 +1195,13 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
+#define ARIZONA_FRF_COEFF_1 0x1700
+#define ARIZONA_FRF_COEFF_2 0x1701
+#define ARIZONA_FRF_COEFF_3 0x1702
+#define ARIZONA_FRF_COEFF_4 0x1703
+#define ARIZONA_V2_DAC_COMP_1 0x1704
+#define ARIZONA_V2_DAC_COMP_2 0x1705
+
/*
* Field Definitions.
@@ -1431,6 +1464,42 @@
#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
/*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */
+#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */
+#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */
+#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */
+#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */
+#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */
+#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */
+
+/*
* R97 (0x61) - Sample Rate Sequence Select 1
*/
#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2301,9 +2370,9 @@
#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */
#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */
#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */
-#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */
-#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */
-#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */
+#define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */
+#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */
+#define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */
/*
* R667 (0x29B) - Headphone Detect 1
@@ -2325,6 +2394,9 @@
#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */
#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
@@ -2413,6 +2485,16 @@
#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
/*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */
+
+/*
* R707 (0x2C3) - Mic noise mix control 1
*/
#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
@@ -2515,9 +2597,12 @@
#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
-#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2525,6 +2610,12 @@
/*
* R785 (0x311) - ADC Digital Volume 1L
*/
+#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2557,6 +2648,12 @@
/*
* R789 (0x315) - ADC Digital Volume 1R
*/
+#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2588,9 +2685,12 @@
#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
-#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2598,6 +2698,12 @@
/*
* R793 (0x319) - ADC Digital Volume 2L
*/
+#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2661,9 +2767,12 @@
#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
-#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
@@ -3403,11 +3512,45 @@
#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
/*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
* R1090 (0x442) - DRE Control 2
*/
#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */
/*
* R1091 (0x443) - DRE Control 3
@@ -3419,6 +3562,49 @@
#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */
+
/*
* R1104 (0x450) - DAC AEC Control 1
*/
@@ -3527,6 +3713,13 @@
#define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */
/*
+ * R1188 (0x4A4) HP Test Ctrl 1
+ */
+#define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */
+#define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */
+#define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */
+
+/*
* R1244 (0x4DC) - DAC comp 1
*/
#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */
@@ -4299,6 +4492,86 @@
#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
/*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */
+
+/*
* R1507 (0x5E3) - SLIMbus Framer Ref Gear
*/
#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
@@ -4553,6 +4826,13 @@
#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
/*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */
+
+/*
* R3104 (0xC20) - Misc Pad Ctrl 1
*/
#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
@@ -6292,6 +6572,10 @@
/*
* R3366 (0xD26) - Interrupt Raw Status 8
*/
+#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
@@ -7777,6 +8061,66 @@
#define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */
/*
+ * R3840 (0xF00) - Clock Control
+ */
+#define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */
+#define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */
+#define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */
+#define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */
+#define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */
+#define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */
+#define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */
+#define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */
+#define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */
+#define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */
+#define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */
+#define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */
+#define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */
+#define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */
+#define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */
+#define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */
+#define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */
+#define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */
+#define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */
+#define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */
+#define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */
+#define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */
+#define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */
+#define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */
+#define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */
+#define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */
+#define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */
+#define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */
+#define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */
+#define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */
+#define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */
+#define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */
+
+/*
+ * R3841 (0xF01) - ANC SRC
+ */
+#define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */
+#define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */
+#define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */
+#define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */
+#define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */
+#define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */
+
+/*
+ * R3863 (0xF17) - FCL ADC Reformatter Control
+ */
+#define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */
+#define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */
+#define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */
+
+/*
+ * R3954 (0xF72) - FCR ADC Reformatter Control
+ */
+#define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */
+#define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */
+#define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */
+
+/*
* R4352 (0x1100) - DSP1 Control 1
*/
#define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */
diff --git a/kernel/include/linux/mfd/axp20x.h b/kernel/include/linux/mfd/axp20x.h
index dfabd6db7..b24c771ce 100644
--- a/kernel/include/linux/mfd/axp20x.h
+++ b/kernel/include/linux/mfd/axp20x.h
@@ -11,9 +11,13 @@
#ifndef __LINUX_MFD_AXP20X_H
#define __LINUX_MFD_AXP20X_H
+#include <linux/regmap.h>
+
enum {
- AXP202_ID = 0,
+ AXP152_ID = 0,
+ AXP202_ID,
AXP209_ID,
+ AXP221_ID,
AXP288_ID,
NR_AXP20X_VARIANTS,
};
@@ -21,6 +25,24 @@ enum {
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
+#define AXP152_PWR_OP_MODE 0x01
+#define AXP152_LDO3456_DC1234_CTRL 0x12
+#define AXP152_ALDO_OP_MODE 0x13
+#define AXP152_LDO0_CTRL 0x15
+#define AXP152_DCDC2_V_OUT 0x23
+#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC1_V_OUT 0x26
+#define AXP152_DCDC3_V_OUT 0x27
+#define AXP152_ALDO12_V_OUT 0x28
+#define AXP152_DLDO1_V_OUT 0x29
+#define AXP152_DLDO2_V_OUT 0x2a
+#define AXP152_DCDC4_V_OUT 0x2b
+#define AXP152_V_OFF 0x31
+#define AXP152_OFF_CTRL 0x32
+#define AXP152_PEK_KEY 0x36
+#define AXP152_DCDC_FREQ 0x37
+#define AXP152_DCDC_MODE 0x80
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -45,7 +67,36 @@ enum {
#define AXP20X_V_LTF_DISCHRG 0x3c
#define AXP20X_V_HTF_DISCHRG 0x3d
+#define AXP22X_PWR_OUT_CTRL1 0x10
+#define AXP22X_PWR_OUT_CTRL2 0x12
+#define AXP22X_PWR_OUT_CTRL3 0x13
+#define AXP22X_DLDO1_V_OUT 0x15
+#define AXP22X_DLDO2_V_OUT 0x16
+#define AXP22X_DLDO3_V_OUT 0x17
+#define AXP22X_DLDO4_V_OUT 0x18
+#define AXP22X_ELDO1_V_OUT 0x19
+#define AXP22X_ELDO2_V_OUT 0x1a
+#define AXP22X_ELDO3_V_OUT 0x1b
+#define AXP22X_DC5LDO_V_OUT 0x1c
+#define AXP22X_DCDC1_V_OUT 0x21
+#define AXP22X_DCDC2_V_OUT 0x22
+#define AXP22X_DCDC3_V_OUT 0x23
+#define AXP22X_DCDC4_V_OUT 0x24
+#define AXP22X_DCDC5_V_OUT 0x25
+#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
+#define AXP22X_ALDO1_V_OUT 0x28
+#define AXP22X_ALDO2_V_OUT 0x29
+#define AXP22X_ALDO3_V_OUT 0x2a
+#define AXP22X_CHRG_CTRL3 0x35
+
/* Interrupt */
+#define AXP152_IRQ1_EN 0x40
+#define AXP152_IRQ2_EN 0x41
+#define AXP152_IRQ3_EN 0x42
+#define AXP152_IRQ1_STATE 0x48
+#define AXP152_IRQ2_STATE 0x49
+#define AXP152_IRQ3_STATE 0x4a
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -100,7 +151,23 @@ enum {
#define AXP20X_VBUS_MON 0x8b
#define AXP20X_OVER_TMP 0x8f
+#define AXP22X_PWREN_CTRL1 0x8c
+#define AXP22X_PWREN_CTRL2 0x8d
+
/* GPIO */
+#define AXP152_GPIO0_CTRL 0x90
+#define AXP152_GPIO1_CTRL 0x91
+#define AXP152_GPIO2_CTRL 0x92
+#define AXP152_GPIO3_CTRL 0x93
+#define AXP152_LDOGPIO2_V_OUT 0x96
+#define AXP152_GPIO_INPUT 0x97
+#define AXP152_PWM0_FREQ_X 0x98
+#define AXP152_PWM0_FREQ_Y 0x99
+#define AXP152_PWM0_DUTY_CYCLE 0x9a
+#define AXP152_PWM1_FREQ_X 0x9b
+#define AXP152_PWM1_FREQ_Y 0x9c
+#define AXP152_PWM1_DUTY_CYCLE 0x9d
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -108,6 +175,11 @@ enum {
#define AXP20X_GPIO20_SS 0x94
#define AXP20X_GPIO3_CTRL 0x95
+#define AXP22X_LDO_IO0_V_OUT 0x91
+#define AXP22X_LDO_IO1_V_OUT 0x93
+#define AXP22X_GPIO_STATE 0x94
+#define AXP22X_GPIO_PULL_DOWN 0x95
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,6 +192,15 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* OCV */
+#define AXP20X_RDC_H 0xba
+#define AXP20X_RDC_L 0xbb
+#define AXP20X_OCV(m) (0xc0 + (m))
+#define AXP20X_OCV_MAX 0xf
+
+/* AXP22X specific registers */
+#define AXP22X_BATLOW_THRES1 0xe6
+
/* AXP288 specific registers */
#define AXP288_PMIC_ADC_H 0x56
#define AXP288_PMIC_ADC_L 0x57
@@ -158,8 +239,52 @@ enum {
AXP20X_REG_ID_MAX,
};
+enum {
+ AXP22X_DCDC1 = 0,
+ AXP22X_DCDC2,
+ AXP22X_DCDC3,
+ AXP22X_DCDC4,
+ AXP22X_DCDC5,
+ AXP22X_DC1SW,
+ AXP22X_DC5LDO,
+ AXP22X_ALDO1,
+ AXP22X_ALDO2,
+ AXP22X_ALDO3,
+ AXP22X_ELDO1,
+ AXP22X_ELDO2,
+ AXP22X_ELDO3,
+ AXP22X_DLDO1,
+ AXP22X_DLDO2,
+ AXP22X_DLDO3,
+ AXP22X_DLDO4,
+ AXP22X_RTC_LDO,
+ AXP22X_LDO_IO0,
+ AXP22X_LDO_IO1,
+ AXP22X_REG_ID_MAX,
+};
+
/* IRQs */
enum {
+ AXP152_IRQ_LDO0IN_CONNECT = 1,
+ AXP152_IRQ_LDO0IN_REMOVAL,
+ AXP152_IRQ_ALDO0IN_CONNECT,
+ AXP152_IRQ_ALDO0IN_REMOVAL,
+ AXP152_IRQ_DCDC1_V_LOW,
+ AXP152_IRQ_DCDC2_V_LOW,
+ AXP152_IRQ_DCDC3_V_LOW,
+ AXP152_IRQ_DCDC4_V_LOW,
+ AXP152_IRQ_PEK_SHORT,
+ AXP152_IRQ_PEK_LONG,
+ AXP152_IRQ_TIMER,
+ AXP152_IRQ_PEK_RIS_EDGE,
+ AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_GPIO3_INPUT,
+ AXP152_IRQ_GPIO2_INPUT,
+ AXP152_IRQ_GPIO1_INPUT,
+ AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
AXP20X_IRQ_ACIN_REMOVAL,
@@ -199,6 +324,34 @@ enum {
AXP20X_IRQ_GPIO0_INPUT,
};
+enum axp22x_irqs {
+ AXP22X_IRQ_ACIN_OVER_V = 1,
+ AXP22X_IRQ_ACIN_PLUGIN,
+ AXP22X_IRQ_ACIN_REMOVAL,
+ AXP22X_IRQ_VBUS_OVER_V,
+ AXP22X_IRQ_VBUS_PLUGIN,
+ AXP22X_IRQ_VBUS_REMOVAL,
+ AXP22X_IRQ_VBUS_V_LOW,
+ AXP22X_IRQ_BATT_PLUGIN,
+ AXP22X_IRQ_BATT_REMOVAL,
+ AXP22X_IRQ_BATT_ENT_ACT_MODE,
+ AXP22X_IRQ_BATT_EXIT_ACT_MODE,
+ AXP22X_IRQ_CHARG,
+ AXP22X_IRQ_CHARG_DONE,
+ AXP22X_IRQ_BATT_TEMP_HIGH,
+ AXP22X_IRQ_BATT_TEMP_LOW,
+ AXP22X_IRQ_DIE_TEMP_HIGH,
+ AXP22X_IRQ_PEK_SHORT,
+ AXP22X_IRQ_PEK_LONG,
+ AXP22X_IRQ_LOW_PWR_LVL1,
+ AXP22X_IRQ_LOW_PWR_LVL2,
+ AXP22X_IRQ_TIMER,
+ AXP22X_IRQ_PEK_RIS_EDGE,
+ AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_GPIO1_INPUT,
+ AXP22X_IRQ_GPIO0_INPUT,
+};
+
enum axp288_irqs {
AXP288_IRQ_VBUS_FALL = 2,
AXP288_IRQ_VBUS_RISE,
@@ -275,4 +428,38 @@ struct axp20x_fg_pdata {
int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
};
+struct axp20x_chrg_pdata {
+ int max_cc;
+ int max_cv;
+ int def_cc;
+ int def_cv;
+};
+
+struct axp288_extcon_pdata {
+ /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
+ struct gpio_desc *gpio_mux_cntl;
+};
+
+/* generic helper function for reading 9-16 bit wide regs */
+static inline int axp20x_read_variable_width(struct regmap *regmap,
+ unsigned int reg, unsigned int width)
+{
+ unsigned int reg_val, result;
+ int err;
+
+ err = regmap_read(regmap, reg, &reg_val);
+ if (err)
+ return err;
+
+ result = reg_val << (width - 8);
+
+ err = regmap_read(regmap, reg + 1, &reg_val);
+ if (err)
+ return err;
+
+ result |= reg_val;
+
+ return result;
+}
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/kernel/include/linux/mfd/core.h b/kernel/include/linux/mfd/core.h
index a76bc100b..27dac3ff1 100644
--- a/kernel/include/linux/mfd/core.h
+++ b/kernel/include/linux/mfd/core.h
@@ -18,6 +18,12 @@
struct irq_domain;
+/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
+struct mfd_cell_acpi_match {
+ const char *pnpid;
+ const unsigned long long adr;
+};
+
/*
* This struct describes the MFD part ("cell").
* After registration the copy of this structure will become the platform data
@@ -44,8 +50,8 @@ struct mfd_cell {
*/
const char *of_compatible;
- /* Matches ACPI PNP id, either _HID or _CID */
- const char *acpi_pnpid;
+ /* Matches ACPI */
+ const struct mfd_cell_acpi_match *acpi_match;
/*
* These resources can be specified relative to the parent device.
diff --git a/kernel/include/linux/mfd/cros_ec.h b/kernel/include/linux/mfd/cros_ec.h
index 324a34683..494682ce4 100644
--- a/kernel/include/linux/mfd/cros_ec.h
+++ b/kernel/include/linux/mfd/cros_ec.h
@@ -17,10 +17,29 @@
#define __LINUX_MFD_CROS_EC_H
#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 2
+
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
@@ -42,8 +61,7 @@ enum {
* @outsize: Outgoing length in bytes
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
- * @outdata: Outgoing data to EC
- * @indata: Where to put the incoming data from EC
+ * @data: Where to put the incoming data from EC and outgoing data to EC
*/
struct cros_ec_command {
uint32_t version;
@@ -51,18 +69,14 @@ struct cros_ec_command {
uint32_t outsize;
uint32_t insize;
uint32_t result;
- uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE];
- uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
+ uint8_t data[0];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device
*
- * @ec_name: name of EC device (e.g. 'chromeos-ec')
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
* @dev: Device pointer for physical comms device
- * @vdev: Device pointer for virtual comms device
- * @cdev: Character device structure for virtual comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
* @cmd_readmem: direct read of the EC memory-mapped region, if supported
@@ -74,6 +88,7 @@ struct cros_ec_command {
*
* @priv: Private data
* @irq: Interrupt to use
+ * @id: Device id
* @din: input buffer (for data from EC)
* @dout: output buffer (for data to EC)
* \note
@@ -85,41 +100,72 @@ struct cros_ec_command {
* to using dword.
* @din_size: size of din buffer to allocate (zero to use static din)
* @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @parent: pointer to parent device (e.g. i2c or spi device)
* @wake_enabled: true if this device can wake the system from sleep
* @cmd_xfer: send command to EC and get response
* Returns the number of bytes received if the communication succeeded, but
* that doesn't mean the EC was happy with the command. The caller
* should check msg.result for the EC's result code.
+ * @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
- const char *ec_name;
const char *phys_name;
struct device *dev;
- struct device *vdev;
- struct cdev cdev;
bool was_wake_device;
struct class *cros_class;
int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest);
/* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
void *priv;
int irq;
- uint8_t *din;
- uint8_t *dout;
+ u8 *din;
+ u8 *dout;
int din_size;
int dout_size;
- struct device *parent;
bool wake_enabled;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
struct mutex lock;
};
+/* struct cros_ec_platform - ChromeOS EC platform information
+ *
+ * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: offset to apply for each command. Set when
+ * registering a devicde behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/*
+ * struct cros_ec_dev - ChromeOS EC device entry point
+ *
+ * @class_dev: Device structure used in sysfs
+ * @cdev: Character device structure in /dev
+ * @ec_dev: cros_ec_device structure to talk to the physical device
+ * @dev: pointer to the platform device
+ * @cmd_offset: offset to apply for each command.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cdev cdev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ u16 cmd_offset;
+};
+
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
*
@@ -198,4 +244,17 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_register - Query the protocol version supported by the ChromeOS EC
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+/* sysfs stuff */
+extern struct attribute_group cros_ec_attr_group;
+extern struct attribute_group cros_ec_lightbar_attr_group;
+extern struct attribute_group cros_ec_vbc_attr_group;
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/kernel/include/linux/mfd/cros_ec_commands.h b/kernel/include/linux/mfd/cros_ec_commands.h
index a49cd41fe..13b630c10 100644
--- a/kernel/include/linux/mfd/cros_ec_commands.h
+++ b/kernel/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
/*
* Notes on commands:
*
- * Each command is an 8-byte command value. Commands which take params or
+ * Each command is an 16-bit command value. Commands which take params or
* return response data specify structs for that data. If no struct is
* specified, the command does not input or output data, respectively.
* Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
-struct lightbar_params {
+struct lightbar_params_v0 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __packed;
+struct lightbar_params_v1 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_display_time;
+
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
struct {
/* no args */
- } dump, off, on, init, get_seq, get_params, version;
+ } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
+ version, get_brightness, get_demo;
- struct num {
+ struct {
uint8_t num;
- } brightness, seq, demo;
+ } set_brightness, seq, demo;
- struct reg {
+ struct {
uint8_t ctrl, reg, value;
} reg;
- struct rgb {
+ struct {
uint8_t led, red, green, blue;
- } rgb;
+ } set_rgb;
+
+ struct {
+ uint8_t led;
+ } get_rgb;
- struct lightbar_params set_params;
+ struct lightbar_params_v0 set_params_v0;
+ struct lightbar_params_v1 set_params_v1;
};
} __packed;
struct ec_response_lightbar {
union {
- struct dump {
+ struct {
struct {
uint8_t reg;
uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
} vals[23];
} dump;
- struct get_seq {
+ struct {
uint8_t num;
- } get_seq;
+ } get_seq, get_brightness, get_demo;
- struct lightbar_params get_params;
+ struct lightbar_params_v0 get_params_v0;
+ struct lightbar_params_v1 get_params_v1;
- struct version {
+ struct {
uint32_t num;
uint32_t flags;
} version;
struct {
+ uint8_t red, green, blue;
+ } get_rgb;
+
+ struct {
/* no return params */
- } off, on, init, brightness, seq, reg, rgb, demo, set_params;
+ } off, on, init, set_brightness, seq, reg, set_rgb,
+ demo, set_params_v0, set_params_v1;
};
} __packed;
@@ -1056,15 +1111,20 @@ enum lightbar_command {
LIGHTBAR_CMD_OFF = 1,
LIGHTBAR_CMD_ON = 2,
LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
LIGHTBAR_CMD_SEQ = 5,
LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_RGB = 7,
+ LIGHTBAR_CMD_SET_RGB = 7,
LIGHTBAR_CMD_GET_SEQ = 8,
LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS = 10,
- LIGHTBAR_CMD_SET_PARAMS = 11,
+ LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
+ LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
+ LIGHTBAR_CMD_GET_RGB = 14,
+ LIGHTBAR_CMD_GET_DEMO = 15,
+ LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
+ LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
LIGHTBAR_NUM_CMDS
};
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
/*****************************************************************************/
/* Port80 log access */
+/* Maximum entries that can be read/written in a single command */
+#define EC_PORT80_SIZE_MAX 32
+
/* Get last port80 code from previous boot */
#define EC_CMD_PORT80_LAST_BOOT 0x48
+#define EC_CMD_PORT80_READ 0x48
+
+enum ec_port80_subcmd {
+ EC_PORT80_GET_INFO = 0,
+ EC_PORT80_READ_BUFFER,
+};
+
+struct ec_params_port80_read {
+ uint16_t subcmd;
+ union {
+ struct {
+ uint32_t offset;
+ uint32_t num_entries;
+ } read_buffer;
+ };
+} __packed;
+
+struct ec_response_port80_read {
+ union {
+ struct {
+ uint32_t writes;
+ uint32_t history_size;
+ uint32_t last_boot;
+ } get_info;
+ struct {
+ uint16_t codes[EC_PORT80_SIZE_MAX];
+ } data;
+ };
+} __packed;
struct ec_response_port80_last_boot {
uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
/* Get GPIO value */
#define EC_CMD_GPIO_GET 0x93
+/* Version 0 of input params and response */
struct ec_params_gpio_get {
char name[32];
} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
uint8_t val;
} __packed;
+/* Version 1 of input params and response */
+struct ec_params_gpio_get_v1 {
+ uint8_t subcmd;
+ union {
+ struct {
+ char name[32];
+ } get_value_by_name;
+ struct {
+ uint8_t index;
+ } get_info;
+ };
+} __packed;
+
+struct ec_response_gpio_get_v1 {
+ union {
+ struct {
+ uint8_t val;
+ } get_value_by_name, get_count;
+ struct {
+ uint8_t val;
+ char name[32];
+ uint32_t flags;
+ } get_info;
+ };
+} __packed;
+
+enum gpio_get_subcmd {
+ EC_GPIO_GET_BY_NAME = 0,
+ EC_GPIO_GET_COUNT = 1,
+ EC_GPIO_GET_INFO = 2,
+};
+
/*****************************************************************************/
/* I2C commands. Only available when flash write protect is unlocked. */
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
/*****************************************************************************/
/*
- * Cut off battery power output if the battery supports.
+ * Cut off battery power immediately or after the host has shut down.
*
- * For unsupported battery, just don't implement this command and lets EC
- * return EC_RES_INVALID_COMMAND.
+ * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
+ * EC_RES_SUCCESS if the command was successful.
+ * EC_RES_ERROR if the cut off command failed.
*/
+
#define EC_CMD_BATTERY_CUT_OFF 0x99
+#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
+
+struct ec_params_battery_cutoff {
+ uint8_t flags;
+} __packed;
+
/*****************************************************************************/
/* USB port mux control. */
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
} __packed;
/*****************************************************************************/
+/* Battery vendor parameters
+ *
+ * Get or set vendor-specific parameters in the battery. Implementations may
+ * differ between boards or batteries. On a set operation, the response
+ * contains the actual value set, which may be rounded or clipped from the
+ * requested value.
+ */
+
+#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
+
+enum ec_battery_vendor_param_mode {
+ BATTERY_VENDOR_PARAM_MODE_GET = 0,
+ BATTERY_VENDOR_PARAM_MODE_SET,
+};
+
+struct ec_params_battery_vendor_param {
+ uint32_t param;
+ uint32_t value;
+ uint8_t mode;
+} __packed;
+
+struct ec_response_battery_vendor_param {
+ uint32_t value;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
/*****************************************************************************/
/*
+ * PD commands
+ *
+ * These commands are for PD MCU communication.
+ */
+
+/* EC to PD MCU exchange status command */
+#define EC_CMD_PD_EXCHANGE_STATUS 0x100
+
+/* Status of EC being sent to PD */
+struct ec_params_pd_status {
+ int8_t batt_soc; /* battery state of charge */
+} __packed;
+
+/* Status of PD being sent back to EC */
+struct ec_response_pd_status {
+ int8_t status; /* PD MCU status */
+ uint32_t curr_lim_ma; /* input current limit */
+} __packed;
+
+/* Set USB type-C port role and muxes */
+#define EC_CMD_USB_PD_CONTROL 0x101
+
+enum usb_pd_control_role {
+ USB_PD_CTRL_ROLE_NO_CHANGE = 0,
+ USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
+ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
+ USB_PD_CTRL_ROLE_FORCE_SINK = 3,
+ USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
+};
+
+enum usb_pd_control_mux {
+ USB_PD_CTRL_MUX_NO_CHANGE = 0,
+ USB_PD_CTRL_MUX_NONE = 1,
+ USB_PD_CTRL_MUX_USB = 2,
+ USB_PD_CTRL_MUX_DP = 3,
+ USB_PD_CTRL_MUX_DOCK = 4,
+ USB_PD_CTRL_MUX_AUTO = 5,
+};
+
+struct ec_params_usb_pd_control {
+ uint8_t port;
+ uint8_t role;
+ uint8_t mux;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Passthru commands
+ *
+ * Some platforms have sub-processors chained to each other. For example.
+ *
+ * AP <--> EC <--> PD MCU
+ *
+ * The top 2 bits of the command number are used to indicate which device the
+ * command is intended for. Device 0 is always the device receiving the
+ * command; other device mapping is board-specific.
+ *
+ * When a device receives a command to be passed to a sub-processor, it passes
+ * it on with the device number set back to 0. This allows the sub-processor
+ * to remain blissfully unaware of whether the command originated on the next
+ * device up the chain, or was passed through from the AP.
+ *
+ * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
+ * AP sends command 0x4002 to the EC
+ * EC sends command 0x0002 to the PD MCU
+ * EC forwards PD MCU response back to the AP
+ */
+
+/* Offset and max command number for sub-device n */
+#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
+#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
+
+/*****************************************************************************/
+/*
* Deprecated constants. These constants have been renamed for clarity. The
* meaning and size has not changed. Programs that use the old names should
* switch to the new names soon, as the old names may not be carried forward
diff --git a/kernel/include/linux/mfd/da9052/reg.h b/kernel/include/linux/mfd/da9052/reg.h
index c4dd3a8ad..5010f9787 100644
--- a/kernel/include/linux/mfd/da9052/reg.h
+++ b/kernel/include/linux/mfd/da9052/reg.h
@@ -65,6 +65,9 @@
#define DA9052_GPIO_2_3_REG 22
#define DA9052_GPIO_4_5_REG 23
#define DA9052_GPIO_6_7_REG 24
+#define DA9052_GPIO_8_9_REG 25
+#define DA9052_GPIO_10_11_REG 26
+#define DA9052_GPIO_12_13_REG 27
#define DA9052_GPIO_14_15_REG 28
/* POWER SEQUENCER CONTROL REGISTERS */
diff --git a/kernel/include/linux/mfd/da9055/core.h b/kernel/include/linux/mfd/da9055/core.h
index 956afa445..5dc743fd6 100644
--- a/kernel/include/linux/mfd/da9055/core.h
+++ b/kernel/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
int da9055_device_init(struct da9055 *da9055);
void da9055_device_exit(struct da9055 *da9055);
-extern struct regmap_config da9055_regmap_config;
+extern const struct regmap_config da9055_regmap_config;
#endif /* __DA9055_CORE_H */
diff --git a/kernel/include/linux/mfd/da9062/core.h b/kernel/include/linux/mfd/da9062/core.h
new file mode 100644
index 000000000..376ba8436
--- /dev/null
+++ b/kernel/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+ /* IRQ A */
+ DA9062_IRQ_ONKEY,
+ DA9062_IRQ_ALARM,
+ DA9062_IRQ_TICK,
+ DA9062_IRQ_WDG_WARN,
+ DA9062_IRQ_SEQ_RDY,
+ /* IRQ B*/
+ DA9062_IRQ_TEMP,
+ DA9062_IRQ_LDO_LIM,
+ DA9062_IRQ_DVC_RDY,
+ DA9062_IRQ_VDD_WARN,
+ /* IRQ C */
+ DA9062_IRQ_GPI0,
+ DA9062_IRQ_GPI1,
+ DA9062_IRQ_GPI2,
+ DA9062_IRQ_GPI3,
+ DA9062_IRQ_GPI4,
+
+ DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/kernel/include/linux/mfd/da9062/registers.h b/kernel/include/linux/mfd/da9062/registers.h
new file mode 100644
index 000000000..97790d1b0
--- /dev/null
+++ b/kernel/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID 0x62
+#define DA9062_PMIC_VARIANT_MRC_AA 0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT 1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON 0x000
+#define DA9062AA_STATUS_A 0x001
+#define DA9062AA_STATUS_B 0x002
+#define DA9062AA_STATUS_D 0x004
+#define DA9062AA_FAULT_LOG 0x005
+#define DA9062AA_EVENT_A 0x006
+#define DA9062AA_EVENT_B 0x007
+#define DA9062AA_EVENT_C 0x008
+#define DA9062AA_IRQ_MASK_A 0x00A
+#define DA9062AA_IRQ_MASK_B 0x00B
+#define DA9062AA_IRQ_MASK_C 0x00C
+#define DA9062AA_CONTROL_A 0x00E
+#define DA9062AA_CONTROL_B 0x00F
+#define DA9062AA_CONTROL_C 0x010
+#define DA9062AA_CONTROL_D 0x011
+#define DA9062AA_CONTROL_E 0x012
+#define DA9062AA_CONTROL_F 0x013
+#define DA9062AA_PD_DIS 0x014
+#define DA9062AA_GPIO_0_1 0x015
+#define DA9062AA_GPIO_2_3 0x016
+#define DA9062AA_GPIO_4 0x017
+#define DA9062AA_GPIO_WKUP_MODE 0x01C
+#define DA9062AA_GPIO_MODE0_4 0x01D
+#define DA9062AA_GPIO_OUT0_2 0x01E
+#define DA9062AA_GPIO_OUT3_4 0x01F
+#define DA9062AA_BUCK2_CONT 0x020
+#define DA9062AA_BUCK1_CONT 0x021
+#define DA9062AA_BUCK4_CONT 0x022
+#define DA9062AA_BUCK3_CONT 0x024
+#define DA9062AA_LDO1_CONT 0x026
+#define DA9062AA_LDO2_CONT 0x027
+#define DA9062AA_LDO3_CONT 0x028
+#define DA9062AA_LDO4_CONT 0x029
+#define DA9062AA_DVC_1 0x032
+#define DA9062AA_COUNT_S 0x040
+#define DA9062AA_COUNT_MI 0x041
+#define DA9062AA_COUNT_H 0x042
+#define DA9062AA_COUNT_D 0x043
+#define DA9062AA_COUNT_MO 0x044
+#define DA9062AA_COUNT_Y 0x045
+#define DA9062AA_ALARM_S 0x046
+#define DA9062AA_ALARM_MI 0x047
+#define DA9062AA_ALARM_H 0x048
+#define DA9062AA_ALARM_D 0x049
+#define DA9062AA_ALARM_MO 0x04A
+#define DA9062AA_ALARM_Y 0x04B
+#define DA9062AA_SECOND_A 0x04C
+#define DA9062AA_SECOND_B 0x04D
+#define DA9062AA_SECOND_C 0x04E
+#define DA9062AA_SECOND_D 0x04F
+#define DA9062AA_SEQ 0x081
+#define DA9062AA_SEQ_TIMER 0x082
+#define DA9062AA_ID_2_1 0x083
+#define DA9062AA_ID_4_3 0x084
+#define DA9062AA_ID_12_11 0x088
+#define DA9062AA_ID_14_13 0x089
+#define DA9062AA_ID_16_15 0x08A
+#define DA9062AA_ID_22_21 0x08D
+#define DA9062AA_ID_24_23 0x08E
+#define DA9062AA_ID_26_25 0x08F
+#define DA9062AA_ID_28_27 0x090
+#define DA9062AA_ID_30_29 0x091
+#define DA9062AA_ID_32_31 0x092
+#define DA9062AA_SEQ_A 0x095
+#define DA9062AA_SEQ_B 0x096
+#define DA9062AA_WAIT 0x097
+#define DA9062AA_EN_32K 0x098
+#define DA9062AA_RESET 0x099
+#define DA9062AA_BUCK_ILIM_A 0x09A
+#define DA9062AA_BUCK_ILIM_B 0x09B
+#define DA9062AA_BUCK_ILIM_C 0x09C
+#define DA9062AA_BUCK2_CFG 0x09D
+#define DA9062AA_BUCK1_CFG 0x09E
+#define DA9062AA_BUCK4_CFG 0x09F
+#define DA9062AA_BUCK3_CFG 0x0A0
+#define DA9062AA_VBUCK2_A 0x0A3
+#define DA9062AA_VBUCK1_A 0x0A4
+#define DA9062AA_VBUCK4_A 0x0A5
+#define DA9062AA_VBUCK3_A 0x0A7
+#define DA9062AA_VLDO1_A 0x0A9
+#define DA9062AA_VLDO2_A 0x0AA
+#define DA9062AA_VLDO3_A 0x0AB
+#define DA9062AA_VLDO4_A 0x0AC
+#define DA9062AA_VBUCK2_B 0x0B4
+#define DA9062AA_VBUCK1_B 0x0B5
+#define DA9062AA_VBUCK4_B 0x0B6
+#define DA9062AA_VBUCK3_B 0x0B8
+#define DA9062AA_VLDO1_B 0x0BA
+#define DA9062AA_VLDO2_B 0x0BB
+#define DA9062AA_VLDO3_B 0x0BC
+#define DA9062AA_VLDO4_B 0x0BD
+#define DA9062AA_BBAT_CONT 0x0C5
+#define DA9062AA_INTERFACE 0x105
+#define DA9062AA_CONFIG_A 0x106
+#define DA9062AA_CONFIG_B 0x107
+#define DA9062AA_CONFIG_C 0x108
+#define DA9062AA_CONFIG_D 0x109
+#define DA9062AA_CONFIG_E 0x10A
+#define DA9062AA_CONFIG_G 0x10C
+#define DA9062AA_CONFIG_H 0x10D
+#define DA9062AA_CONFIG_I 0x10E
+#define DA9062AA_CONFIG_J 0x10F
+#define DA9062AA_CONFIG_K 0x110
+#define DA9062AA_CONFIG_M 0x112
+#define DA9062AA_TRIM_CLDR 0x120
+#define DA9062AA_GP_ID_0 0x121
+#define DA9062AA_GP_ID_1 0x122
+#define DA9062AA_GP_ID_2 0x123
+#define DA9062AA_GP_ID_3 0x124
+#define DA9062AA_GP_ID_4 0x125
+#define DA9062AA_GP_ID_5 0x126
+#define DA9062AA_GP_ID_6 0x127
+#define DA9062AA_GP_ID_7 0x128
+#define DA9062AA_GP_ID_8 0x129
+#define DA9062AA_GP_ID_9 0x12A
+#define DA9062AA_GP_ID_10 0x12B
+#define DA9062AA_GP_ID_11 0x12C
+#define DA9062AA_GP_ID_12 0x12D
+#define DA9062AA_GP_ID_13 0x12E
+#define DA9062AA_GP_ID_14 0x12F
+#define DA9062AA_GP_ID_15 0x130
+#define DA9062AA_GP_ID_16 0x131
+#define DA9062AA_GP_ID_17 0x132
+#define DA9062AA_GP_ID_18 0x133
+#define DA9062AA_GP_ID_19 0x134
+#define DA9062AA_DEVICE_ID 0x181
+#define DA9062AA_VARIANT_ID 0x182
+#define DA9062AA_CUSTOMER_ID 0x183
+#define DA9062AA_CONFIG_ID 0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT 0
+#define DA9062AA_PAGE_MASK 0x3f
+#define DA9062AA_WRITE_MODE_SHIFT 6
+#define DA9062AA_WRITE_MODE_MASK BIT(6)
+#define DA9062AA_REVERT_SHIFT 7
+#define DA9062AA_REVERT_MASK BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT 0
+#define DA9062AA_NONKEY_MASK 0x01
+#define DA9062AA_DVC_BUSY_SHIFT 2
+#define DA9062AA_DVC_BUSY_MASK BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT 0
+#define DA9062AA_GPI0_MASK 0x01
+#define DA9062AA_GPI1_SHIFT 1
+#define DA9062AA_GPI1_MASK BIT(1)
+#define DA9062AA_GPI2_SHIFT 2
+#define DA9062AA_GPI2_MASK BIT(2)
+#define DA9062AA_GPI3_SHIFT 3
+#define DA9062AA_GPI3_MASK BIT(3)
+#define DA9062AA_GPI4_SHIFT 4
+#define DA9062AA_GPI4_MASK BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT 0
+#define DA9062AA_LDO1_ILIM_MASK 0x01
+#define DA9062AA_LDO2_ILIM_SHIFT 1
+#define DA9062AA_LDO2_ILIM_MASK BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT 2
+#define DA9062AA_LDO3_ILIM_MASK BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT 3
+#define DA9062AA_LDO4_ILIM_MASK BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT 0
+#define DA9062AA_TWD_ERROR_MASK 0x01
+#define DA9062AA_POR_SHIFT 1
+#define DA9062AA_POR_MASK BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT 2
+#define DA9062AA_VDD_FAULT_MASK BIT(2)
+#define DA9062AA_VDD_START_SHIFT 3
+#define DA9062AA_VDD_START_MASK BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT 4
+#define DA9062AA_TEMP_CRIT_MASK BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT 5
+#define DA9062AA_KEY_RESET_MASK BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT 6
+#define DA9062AA_NSHUTDOWN_MASK BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT 7
+#define DA9062AA_WAIT_SHUT_MASK BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT 0
+#define DA9062AA_E_NONKEY_MASK 0x01
+#define DA9062AA_E_ALARM_SHIFT 1
+#define DA9062AA_E_ALARM_MASK BIT(1)
+#define DA9062AA_E_TICK_SHIFT 2
+#define DA9062AA_E_TICK_MASK BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT 3
+#define DA9062AA_E_WDG_WARN_MASK BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT 4
+#define DA9062AA_E_SEQ_RDY_MASK BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT 5
+#define DA9062AA_EVENTS_B_MASK BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT 6
+#define DA9062AA_EVENTS_C_MASK BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT 1
+#define DA9062AA_E_TEMP_MASK BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT 3
+#define DA9062AA_E_LDO_LIM_MASK BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT 5
+#define DA9062AA_E_DVC_RDY_MASK BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT 7
+#define DA9062AA_E_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT 0
+#define DA9062AA_E_GPI0_MASK 0x01
+#define DA9062AA_E_GPI1_SHIFT 1
+#define DA9062AA_E_GPI1_MASK BIT(1)
+#define DA9062AA_E_GPI2_SHIFT 2
+#define DA9062AA_E_GPI2_MASK BIT(2)
+#define DA9062AA_E_GPI3_SHIFT 3
+#define DA9062AA_E_GPI3_MASK BIT(3)
+#define DA9062AA_E_GPI4_SHIFT 4
+#define DA9062AA_E_GPI4_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT 0
+#define DA9062AA_M_NONKEY_MASK 0x01
+#define DA9062AA_M_ALARM_SHIFT 1
+#define DA9062AA_M_ALARM_MASK BIT(1)
+#define DA9062AA_M_TICK_SHIFT 2
+#define DA9062AA_M_TICK_MASK BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT 3
+#define DA9062AA_M_WDG_WARN_MASK BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT 4
+#define DA9062AA_M_SEQ_RDY_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT 1
+#define DA9062AA_M_TEMP_MASK BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT 3
+#define DA9062AA_M_LDO_LIM_MASK BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT 5
+#define DA9062AA_M_DVC_RDY_MASK BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT 7
+#define DA9062AA_M_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT 0
+#define DA9062AA_M_GPI0_MASK 0x01
+#define DA9062AA_M_GPI1_SHIFT 1
+#define DA9062AA_M_GPI1_MASK BIT(1)
+#define DA9062AA_M_GPI2_SHIFT 2
+#define DA9062AA_M_GPI2_MASK BIT(2)
+#define DA9062AA_M_GPI3_SHIFT 3
+#define DA9062AA_M_GPI3_MASK BIT(3)
+#define DA9062AA_M_GPI4_SHIFT 4
+#define DA9062AA_M_GPI4_MASK BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT 0
+#define DA9062AA_SYSTEM_EN_MASK 0x01
+#define DA9062AA_POWER_EN_SHIFT 1
+#define DA9062AA_POWER_EN_MASK BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT 2
+#define DA9062AA_POWER1_EN_MASK BIT(2)
+#define DA9062AA_STANDBY_SHIFT 3
+#define DA9062AA_STANDBY_MASK BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT 4
+#define DA9062AA_M_SYSTEM_EN_MASK BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT 5
+#define DA9062AA_M_POWER_EN_MASK BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT 6
+#define DA9062AA_M_POWER1_EN_MASK BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT 1
+#define DA9062AA_WATCHDOG_PD_MASK BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT 2
+#define DA9062AA_FREEZE_EN_MASK BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT 3
+#define DA9062AA_NRES_MODE_MASK BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT 4
+#define DA9062AA_NONKEY_LOCK_MASK BIT(4)
+#define DA9062AA_NFREEZE_SHIFT 5
+#define DA9062AA_NFREEZE_MASK (0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT 7
+#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT 0
+#define DA9062AA_DEBOUNCING_MASK 0x07
+#define DA9062AA_AUTO_BOOT_SHIFT 3
+#define DA9062AA_AUTO_BOOT_MASK BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT 4
+#define DA9062AA_OTPREAD_EN_MASK BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT 5
+#define DA9062AA_SLEW_RATE_MASK (0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT 7
+#define DA9062AA_DEF_SUPPLY_MASK BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT 0
+#define DA9062AA_TWDSCALE_MASK 0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT 0
+#define DA9062AA_RTC_MODE_PD_MASK 0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT 1
+#define DA9062AA_RTC_MODE_SD_MASK BIT(1)
+#define DA9062AA_RTC_EN_SHIFT 2
+#define DA9062AA_RTC_EN_MASK BIT(2)
+#define DA9062AA_V_LOCK_SHIFT 7
+#define DA9062AA_V_LOCK_MASK BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT 0
+#define DA9062AA_WATCHDOG_MASK 0x01
+#define DA9062AA_SHUTDOWN_SHIFT 1
+#define DA9062AA_SHUTDOWN_MASK BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT 2
+#define DA9062AA_WAKE_UP_MASK BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT 0
+#define DA9062AA_GPI_DIS_MASK 0x01
+#define DA9062AA_PMIF_DIS_SHIFT 2
+#define DA9062AA_PMIF_DIS_MASK BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT 4
+#define DA9062AA_CLDR_PAUSE_MASK BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT 5
+#define DA9062AA_BBAT_DIS_MASK BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT 6
+#define DA9062AA_OUT32K_PAUSE_MASK BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT 7
+#define DA9062AA_PMCONT_DIS_MASK BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT 0
+#define DA9062AA_GPIO0_PIN_MASK 0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT 2
+#define DA9062AA_GPIO0_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT 3
+#define DA9062AA_GPIO0_WEN_MASK BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT 4
+#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT 6
+#define DA9062AA_GPIO1_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT 7
+#define DA9062AA_GPIO1_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT 0
+#define DA9062AA_GPIO2_PIN_MASK 0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT 2
+#define DA9062AA_GPIO2_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT 3
+#define DA9062AA_GPIO2_WEN_MASK BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT 4
+#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT 6
+#define DA9062AA_GPIO3_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT 7
+#define DA9062AA_GPIO3_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT 0
+#define DA9062AA_GPIO4_PIN_MASK 0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT 2
+#define DA9062AA_GPIO4_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT 3
+#define DA9062AA_GPIO4_WEN_MASK BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT 0
+#define DA9062AA_GPIO0_MODE_MASK 0x01
+#define DA9062AA_GPIO1_MODE_SHIFT 1
+#define DA9062AA_GPIO1_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT 2
+#define DA9062AA_GPIO2_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT 3
+#define DA9062AA_GPIO3_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT 4
+#define DA9062AA_GPIO4_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT 0
+#define DA9062AA_GPIO0_OUT_MASK 0x07
+#define DA9062AA_GPIO1_OUT_SHIFT 3
+#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT 6
+#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT 0
+#define DA9062AA_GPIO3_OUT_MASK 0x07
+#define DA9062AA_GPIO4_OUT_SHIFT 3
+#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT 0
+#define DA9062AA_BUCK2_EN_MASK 0x01
+#define DA9062AA_BUCK2_GPI_SHIFT 1
+#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT 3
+#define DA9062AA_BUCK2_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT 5
+#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT 0
+#define DA9062AA_BUCK1_EN_MASK 0x01
+#define DA9062AA_BUCK1_GPI_SHIFT 1
+#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT 3
+#define DA9062AA_BUCK1_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT 5
+#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT 0
+#define DA9062AA_BUCK4_EN_MASK 0x01
+#define DA9062AA_BUCK4_GPI_SHIFT 1
+#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT 3
+#define DA9062AA_BUCK4_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT 5
+#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT 0
+#define DA9062AA_BUCK3_EN_MASK 0x01
+#define DA9062AA_BUCK3_GPI_SHIFT 1
+#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT 3
+#define DA9062AA_BUCK3_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT 5
+#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT 0
+#define DA9062AA_LDO1_EN_MASK 0x01
+#define DA9062AA_LDO1_GPI_SHIFT 1
+#define DA9062AA_LDO1_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT 3
+#define DA9062AA_LDO1_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT 5
+#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT 7
+#define DA9062AA_LDO1_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT 0
+#define DA9062AA_LDO2_EN_MASK 0x01
+#define DA9062AA_LDO2_GPI_SHIFT 1
+#define DA9062AA_LDO2_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT 3
+#define DA9062AA_LDO2_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT 5
+#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT 7
+#define DA9062AA_LDO2_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT 0
+#define DA9062AA_LDO3_EN_MASK 0x01
+#define DA9062AA_LDO3_GPI_SHIFT 1
+#define DA9062AA_LDO3_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT 3
+#define DA9062AA_LDO3_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT 5
+#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT 7
+#define DA9062AA_LDO3_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT 0
+#define DA9062AA_LDO4_EN_MASK 0x01
+#define DA9062AA_LDO4_GPI_SHIFT 1
+#define DA9062AA_LDO4_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT 3
+#define DA9062AA_LDO4_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT 5
+#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT 7
+#define DA9062AA_LDO4_CONF_MASK BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT 0
+#define DA9062AA_VBUCK1_SEL_MASK 0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT 1
+#define DA9062AA_VBUCK2_SEL_MASK BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT 2
+#define DA9062AA_VBUCK4_SEL_MASK BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT 3
+#define DA9062AA_VBUCK3_SEL_MASK BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT 4
+#define DA9062AA_VLDO1_SEL_MASK BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT 5
+#define DA9062AA_VLDO2_SEL_MASK BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT 6
+#define DA9062AA_VLDO3_SEL_MASK BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT 7
+#define DA9062AA_VLDO4_SEL_MASK BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT 0
+#define DA9062AA_COUNT_SEC_MASK 0x3f
+#define DA9062AA_RTC_READ_SHIFT 7
+#define DA9062AA_RTC_READ_MASK BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT 0
+#define DA9062AA_COUNT_MIN_MASK 0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT 0
+#define DA9062AA_COUNT_HOUR_MASK 0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT 0
+#define DA9062AA_COUNT_DAY_MASK 0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT 0
+#define DA9062AA_COUNT_MONTH_MASK 0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT 0
+#define DA9062AA_COUNT_YEAR_MASK 0x3f
+#define DA9062AA_MONITOR_SHIFT 6
+#define DA9062AA_MONITOR_MASK BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT 0
+#define DA9062AA_ALARM_SEC_MASK 0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT 6
+#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT 0
+#define DA9062AA_ALARM_MIN_MASK 0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT 0
+#define DA9062AA_ALARM_HOUR_MASK 0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT 0
+#define DA9062AA_ALARM_DAY_MASK 0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT 0
+#define DA9062AA_ALARM_MONTH_MASK 0x0f
+#define DA9062AA_TICK_TYPE_SHIFT 4
+#define DA9062AA_TICK_TYPE_MASK BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT 5
+#define DA9062AA_TICK_WAKE_MASK BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT 0
+#define DA9062AA_ALARM_YEAR_MASK 0x3f
+#define DA9062AA_ALARM_ON_SHIFT 6
+#define DA9062AA_ALARM_ON_MASK BIT(6)
+#define DA9062AA_TICK_ON_SHIFT 7
+#define DA9062AA_TICK_ON_MASK BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT 0
+#define DA9062AA_SECONDS_A_MASK 0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT 0
+#define DA9062AA_SECONDS_B_MASK 0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT 0
+#define DA9062AA_SECONDS_C_MASK 0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT 0
+#define DA9062AA_SECONDS_D_MASK 0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT 0
+#define DA9062AA_SEQ_POINTER_MASK 0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT 4
+#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT 0
+#define DA9062AA_SEQ_TIME_MASK 0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT 4
+#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT 0
+#define DA9062AA_LDO1_STEP_MASK 0x0f
+#define DA9062AA_LDO2_STEP_SHIFT 4
+#define DA9062AA_LDO2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT 0
+#define DA9062AA_LDO3_STEP_MASK 0x0f
+#define DA9062AA_LDO4_STEP_SHIFT 4
+#define DA9062AA_LDO4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT 4
+#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT 0
+#define DA9062AA_BUCK1_STEP_MASK 0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT 4
+#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT 0
+#define DA9062AA_BUCK4_STEP_MASK 0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT 4
+#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT 0
+#define DA9062AA_GP_RISE1_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT 4
+#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT 0
+#define DA9062AA_GP_RISE2_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT 4
+#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT 0
+#define DA9062AA_GP_RISE3_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT 4
+#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT 0
+#define DA9062AA_GP_RISE4_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT 4
+#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT 0
+#define DA9062AA_GP_RISE5_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT 4
+#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT 0
+#define DA9062AA_WAIT_STEP_MASK 0x0f
+#define DA9062AA_EN32K_STEP_SHIFT 4
+#define DA9062AA_EN32K_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT 0
+#define DA9062AA_SYSTEM_END_MASK 0x0f
+#define DA9062AA_POWER_END_SHIFT 4
+#define DA9062AA_POWER_END_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT 0
+#define DA9062AA_MAX_COUNT_MASK 0x0f
+#define DA9062AA_PART_DOWN_SHIFT 4
+#define DA9062AA_PART_DOWN_MASK (0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT 0
+#define DA9062AA_WAIT_TIME_MASK 0x0f
+#define DA9062AA_WAIT_MODE_SHIFT 4
+#define DA9062AA_WAIT_MODE_MASK BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT 5
+#define DA9062AA_TIME_OUT_MASK BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT 6
+#define DA9062AA_WAIT_DIR_MASK (0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT 0
+#define DA9062AA_STABILISATION_TIME_MASK 0x07
+#define DA9062AA_CRYSTAL_SHIFT 3
+#define DA9062AA_CRYSTAL_MASK BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT 4
+#define DA9062AA_DELAY_MODE_MASK BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT 5
+#define DA9062AA_OUT_CLOCK_MASK BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT 6
+#define DA9062AA_RTC_CLOCK_MASK BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT 7
+#define DA9062AA_EN_32KOUT_MASK BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT 0
+#define DA9062AA_RESET_TIMER_MASK 0x3f
+#define DA9062AA_RESET_EVENT_SHIFT 6
+#define DA9062AA_RESET_EVENT_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT 0
+#define DA9062AA_BUCK3_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT 0
+#define DA9062AA_BUCK4_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT 0
+#define DA9062AA_BUCK1_ILIM_MASK 0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT 4
+#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT 6
+#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT 6
+#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3
+#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT 4
+#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT 6
+#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT 6
+#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT 0
+#define DA9062AA_VBUCK2_A_MASK 0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT 7
+#define DA9062AA_BUCK2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT 0
+#define DA9062AA_VBUCK1_A_MASK 0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT 7
+#define DA9062AA_BUCK1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT 0
+#define DA9062AA_VBUCK4_A_MASK 0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT 7
+#define DA9062AA_BUCK4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT 0
+#define DA9062AA_VBUCK3_A_MASK 0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT 7
+#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT 0
+#define DA9062AA_VLDO1_A_MASK 0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT 7
+#define DA9062AA_LDO1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT 0
+#define DA9062AA_VLDO2_A_MASK 0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT 7
+#define DA9062AA_LDO2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT 0
+#define DA9062AA_VLDO3_A_MASK 0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT 7
+#define DA9062AA_LDO3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT 0
+#define DA9062AA_VLDO4_A_MASK 0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT 7
+#define DA9062AA_LDO4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT 0
+#define DA9062AA_VBUCK2_B_MASK 0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT 7
+#define DA9062AA_BUCK2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT 0
+#define DA9062AA_VBUCK1_B_MASK 0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT 7
+#define DA9062AA_BUCK1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT 0
+#define DA9062AA_VBUCK4_B_MASK 0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT 7
+#define DA9062AA_BUCK4_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT 0
+#define DA9062AA_VBUCK3_B_MASK 0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT 7
+#define DA9062AA_BUCK3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT 0
+#define DA9062AA_VLDO1_B_MASK 0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT 7
+#define DA9062AA_LDO1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT 0
+#define DA9062AA_VLDO2_B_MASK 0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT 7
+#define DA9062AA_LDO2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT 0
+#define DA9062AA_VLDO3_B_MASK 0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT 7
+#define DA9062AA_LDO3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT 0
+#define DA9062AA_VLDO4_B_MASK 0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT 7
+#define DA9062AA_LDO4_SL_B_MASK BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT 0
+#define DA9062AA_BCHG_VSET_MASK 0x0f
+#define DA9062AA_BCHG_ISET_SHIFT 4
+#define DA9062AA_BCHG_ISET_MASK (0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT 4
+#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT 0
+#define DA9062AA_PM_I_V_MASK 0x01
+#define DA9062AA_PM_O_TYPE_SHIFT 2
+#define DA9062AA_PM_O_TYPE_MASK BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT 3
+#define DA9062AA_IRQ_TYPE_MASK BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT 4
+#define DA9062AA_PM_IF_V_MASK BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT 5
+#define DA9062AA_PM_IF_FMP_MASK BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT 6
+#define DA9062AA_PM_IF_HSM_MASK BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0
+#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT 4
+#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT 3
+#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT 4
+#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT 6
+#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT 0
+#define DA9062AA_GPI_V_MASK 0x01
+#define DA9062AA_NIRQ_MODE_SHIFT 1
+#define DA9062AA_NIRQ_MODE_MASK BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT 2
+#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT 5
+#define DA9062AA_FORCE_RESET_MASK BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT 0
+#define DA9062AA_BUCK1_AUTO_MASK 0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT 1
+#define DA9062AA_BUCK2_AUTO_MASK BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT 2
+#define DA9062AA_BUCK4_AUTO_MASK BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT 4
+#define DA9062AA_BUCK3_AUTO_MASK BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT 0
+#define DA9062AA_LDO1_AUTO_MASK 0x01
+#define DA9062AA_LDO2_AUTO_SHIFT 1
+#define DA9062AA_LDO2_AUTO_MASK BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT 2
+#define DA9062AA_LDO3_AUTO_MASK BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT 3
+#define DA9062AA_LDO4_AUTO_MASK BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT 3
+#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT 5
+#define DA9062AA_BUCK2_OD_MASK BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT 6
+#define DA9062AA_BUCK1_OD_MASK BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT 0
+#define DA9062AA_NONKEY_PIN_MASK 0x03
+#define DA9062AA_nONKEY_SD_SHIFT 2
+#define DA9062AA_nONKEY_SD_MASK BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT 3
+#define DA9062AA_WATCHDOG_SD_MASK BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT 4
+#define DA9062AA_KEY_SD_MODE_MASK BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT 5
+#define DA9062AA_HOST_SD_MODE_MASK BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT 6
+#define DA9062AA_INT_SD_MODE_MASK BIT(6)
+#define DA9062AA_LDO_SD_SHIFT 7
+#define DA9062AA_LDO_SD_MASK BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT 0
+#define DA9062AA_KEY_DELAY_MASK 0x03
+#define DA9062AA_SHUT_DELAY_SHIFT 2
+#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT 4
+#define DA9062AA_RESET_DURATION_MASK (0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT 6
+#define DA9062AA_TWOWIRE_TO_MASK BIT(6)
+#define DA9062AA_IF_RESET_SHIFT 7
+#define DA9062AA_IF_RESET_MASK BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT 0
+#define DA9062AA_GPIO0_PUPD_MASK 0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT 1
+#define DA9062AA_GPIO1_PUPD_MASK BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT 2
+#define DA9062AA_GPIO2_PUPD_MASK BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT 3
+#define DA9062AA_GPIO3_PUPD_MASK BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT 4
+#define DA9062AA_GPIO4_PUPD_MASK BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT 1
+#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT 3
+#define DA9062AA_WDG_MODE_MASK BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT 4
+#define DA9062AA_OSC_FRQ_MASK (0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT 0
+#define DA9062AA_TRIM_CLDR_MASK 0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT 0
+#define DA9062AA_GP_0_MASK 0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT 0
+#define DA9062AA_GP_1_MASK 0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT 0
+#define DA9062AA_GP_2_MASK 0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT 0
+#define DA9062AA_GP_3_MASK 0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT 0
+#define DA9062AA_GP_4_MASK 0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT 0
+#define DA9062AA_GP_5_MASK 0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT 0
+#define DA9062AA_GP_6_MASK 0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT 0
+#define DA9062AA_GP_7_MASK 0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT 0
+#define DA9062AA_GP_8_MASK 0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT 0
+#define DA9062AA_GP_9_MASK 0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT 0
+#define DA9062AA_GP_10_MASK 0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT 0
+#define DA9062AA_GP_11_MASK 0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT 0
+#define DA9062AA_GP_12_MASK 0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT 0
+#define DA9062AA_GP_13_MASK 0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT 0
+#define DA9062AA_GP_14_MASK 0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT 0
+#define DA9062AA_GP_15_MASK 0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT 0
+#define DA9062AA_GP_16_MASK 0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT 0
+#define DA9062AA_GP_17_MASK 0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT 0
+#define DA9062AA_GP_18_MASK 0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT 0
+#define DA9062AA_GP_19_MASK 0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT 0
+#define DA9062AA_DEV_ID_MASK 0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT 0
+#define DA9062AA_VRC_MASK 0x0f
+#define DA9062AA_MRC_SHIFT 4
+#define DA9062AA_MRC_MASK (0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT 0
+#define DA9062AA_CUST_ID_MASK 0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT 0
+#define DA9062AA_CONFIG_REV_MASK 0xff
+
+#endif /* __DA9062_H__ */
diff --git a/kernel/include/linux/mfd/da9063/core.h b/kernel/include/linux/mfd/da9063/core.h
index 79f4d822b..621af8212 100644
--- a/kernel/include/linux/mfd/da9063/core.h
+++ b/kernel/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@ enum da9063_irqs {
DA9063_IRQ_COMP_1V2,
DA9063_IRQ_LDO_LIM,
DA9063_IRQ_REG_UVOV,
+ DA9063_IRQ_DVC_RDY,
DA9063_IRQ_VDD_MON,
DA9063_IRQ_WARN,
DA9063_IRQ_GPI0,
diff --git a/kernel/include/linux/mfd/da9063/pdata.h b/kernel/include/linux/mfd/da9063/pdata.h
index 95c874221..612383bd8 100644
--- a/kernel/include/linux/mfd/da9063/pdata.h
+++ b/kernel/include/linux/mfd/da9063/pdata.h
@@ -103,6 +103,7 @@ struct da9063;
struct da9063_pdata {
int (*init)(struct da9063 *da9063);
int irq_base;
+ bool key_power;
unsigned flags;
struct da9063_regulators_pdata *regulators_pdata;
struct led_platform_data *leds_pdata;
diff --git a/kernel/include/linux/mfd/da9150/core.h b/kernel/include/linux/mfd/da9150/core.h
index 76e668933..1bf50caeb 100644
--- a/kernel/include/linux/mfd/da9150/core.h
+++ b/kernel/include/linux/mfd/da9150/core.h
@@ -15,6 +15,7 @@
#define __DA9150_CORE_H
#include <linux/device.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
@@ -46,23 +47,39 @@
#define DA9150_IRQ_GPADC 19
#define DA9150_IRQ_WKUP 20
+/* I2C sub-device address */
+#define DA9150_QIF_I2C_ADDR_LSB 0x5
+
+struct da9150_fg_pdata {
+ u32 update_interval; /* msecs */
+ u8 warn_soc_lvl; /* % value */
+ u8 crit_soc_lvl; /* % value */
+};
+
struct da9150_pdata {
int irq_base;
+ struct da9150_fg_pdata *fg_pdata;
};
struct da9150 {
struct device *dev;
struct regmap *regmap;
+ struct i2c_client *core_qif;
+
struct regmap_irq_chip_data *regmap_irq_data;
int irq;
int irq_base;
};
-/* Device I/O */
+/* Device I/O - Query Interface for FG and standard register access */
+void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf);
+void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf);
+
u8 da9150_reg_read(struct da9150 *da9150, u16 reg);
void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val);
void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val);
void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf);
void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf);
+
#endif /* __DA9150_CORE_H */
diff --git a/kernel/include/linux/mfd/intel_bxtwc.h b/kernel/include/linux/mfd/intel_bxtwc.h
new file mode 100644
index 000000000..1a0ee9d6e
--- /dev/null
+++ b/kernel/include/linux/mfd/intel_bxtwc.h
@@ -0,0 +1,69 @@
+/*
+ * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/mfd/intel_soc_pmic.h>
+
+#ifndef __INTEL_BXTWC_H__
+#define __INTEL_BXTWC_H__
+
+/* BXT WC devices */
+#define BXTWC_DEVICE1_ADDR 0x4E
+#define BXTWC_DEVICE2_ADDR 0x4F
+#define BXTWC_DEVICE3_ADDR 0x5E
+
+/* device1 Registers */
+#define BXTWC_CHIPID 0x4E00
+#define BXTWC_CHIPVER 0x4E01
+
+#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A
+#define BXTWC_CHGRCTRL0_ADDR 0x5E16
+#define BXTWC_CHGRCTRL1_ADDR 0x5E17
+#define BXTWC_CHGRCTRL2_ADDR 0x5E18
+#define BXTWC_CHGRSTATUS_ADDR 0x5E19
+#define BXTWC_THRMBATZONE_ADDR 0x4F22
+
+#define BXTWC_USBPATH_ADDR 0x5E19
+#define BXTWC_USBPHYCTRL_ADDR 0x5E07
+#define BXTWC_USBIDCTRL_ADDR 0x5E05
+#define BXTWC_USBIDEN_MASK 0x01
+#define BXTWC_USBIDSTAT_ADDR 0x00FF
+#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29
+
+#define BXTWC_DBGUSBBC1_ADDR 0x5FE0
+#define BXTWC_DBGUSBBC2_ADDR 0x5FE1
+#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2
+
+#define BXTWC_WAKESRC_ADDR 0x4E22
+#define BXTWC_WAKESRC2_ADDR 0x4EE5
+#define BXTWC_CHRTTADDR_ADDR 0x5E22
+#define BXTWC_CHRTTDATA_ADDR 0x5E23
+
+#define BXTWC_STHRMIRQ0_ADDR 0x4F19
+#define WC_MTHRMIRQ1_ADDR 0x4E12
+#define WC_STHRMIRQ1_ADDR 0x4F1A
+#define WC_STHRMIRQ2_ADDR 0x4F1B
+
+#define BXTWC_THRMZN0H_ADDR 0x4F44
+#define BXTWC_THRMZN0L_ADDR 0x4F45
+#define BXTWC_THRMZN1H_ADDR 0x4F46
+#define BXTWC_THRMZN1L_ADDR 0x4F47
+#define BXTWC_THRMZN2H_ADDR 0x4F48
+#define BXTWC_THRMZN2L_ADDR 0x4F49
+#define BXTWC_THRMZN3H_ADDR 0x4F4A
+#define BXTWC_THRMZN3L_ADDR 0x4F4B
+#define BXTWC_THRMZN4H_ADDR 0x4F4C
+#define BXTWC_THRMZN4L_ADDR 0x4F4D
+
+#endif
diff --git a/kernel/include/linux/mfd/intel_soc_pmic.h b/kernel/include/linux/mfd/intel_soc_pmic.h
index abcbfcf32..cf619dbea 100644
--- a/kernel/include/linux/mfd/intel_soc_pmic.h
+++ b/kernel/include/linux/mfd/intel_soc_pmic.h
@@ -25,6 +25,8 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_level2;
+ struct device *dev;
};
#endif /* __INTEL_SOC_PMIC_H__ */
diff --git a/kernel/include/linux/mfd/lpc_ich.h b/kernel/include/linux/mfd/lpc_ich.h
index 8feac782f..2b300b44f 100644
--- a/kernel/include/linux/mfd/lpc_ich.h
+++ b/kernel/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-/* Watchdog resources */
-#define ICH_RES_IO_TCO 0
-#define ICH_RES_IO_SMI 1
-#define ICH_RES_MEM_OFF 2
-#define ICH_RES_MEM_GCS_PMC 0
-
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
diff --git a/kernel/include/linux/mfd/max77686.h b/kernel/include/linux/mfd/max77686.h
index bb995ab9a..d4b72d519 100644
--- a/kernel/include/linux/mfd/max77686.h
+++ b/kernel/include/linux/mfd/max77686.h
@@ -125,9 +125,4 @@ enum max77686_opmode {
MAX77686_OPMODE_STANDBY,
};
-struct max77686_opmode_data {
- int id;
- int mode;
-};
-
#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/kernel/include/linux/mfd/max77693-common.h b/kernel/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000..095b121aa
--- /dev/null
+++ b/kernel/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
+/*
+ * Common data shared between Maxim 77693 and 77843 drivers
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_COMMON_H
+#define __LINUX_MFD_MAX77693_COMMON_H
+
+enum max77693_types {
+ TYPE_MAX77693_UNKNOWN,
+ TYPE_MAX77693,
+ TYPE_MAX77843,
+
+ TYPE_MAX77693_NUM,
+};
+
+/*
+ * Shared also with max77843.
+ */
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *i2c_muic; /* 0x4A , MUIC */
+ struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
+ struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
+
+ enum max77693_types type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic; /* Only MAX77693 */
+ struct regmap *regmap_chg; /* Only MAX77843 */
+
+ struct regmap_irq_chip_data *irq_data_led;
+ struct regmap_irq_chip_data *irq_data_topsys;
+ struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+
+
+#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/kernel/include/linux/mfd/max77693-private.h b/kernel/include/linux/mfd/max77693-private.h
index 51633ea6f..3c7a63b98 100644
--- a/kernel/include/linux/mfd/max77693-private.h
+++ b/kernel/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+#define MAX77693_STATUS1_ADC_SHIFT 0
+#define MAX77693_STATUS1_ADCLOW_SHIFT 5
+#define MAX77693_STATUS1_ADCERR_SHIFT 6
+#define MAX77693_STATUS1_ADC1K_SHIFT 7
+#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
+#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
+#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
+#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
+
+#define MAX77693_STATUS2_CHGTYP_SHIFT 0
+#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77693_STATUS2_DCDTMR_SHIFT 4
+#define MAX77693_STATUS2_DXOVP_SHIFT 5
+#define MAX77693_STATUS2_VBVOLT_SHIFT 6
+#define MAX77693_STATUS2_VIDRM_SHIFT 7
+#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
+#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
+#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
+#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
+#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
+
+#define MAX77693_STATUS3_OVP_SHIFT 2
+#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
/* MAX77693 CDETCTRL1~2 register */
#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
| (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
| (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
| (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
| (0 << COMN1SW_SHIFT))
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77693_CONTROL2_ADCEN_SHIFT 1
+#define MAX77693_CONTROL2_CPEN_SHIFT 2
+#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
+#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
+#define MAX77693_CONTROL2_ACCDET_SHIFT 5
+#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77693_CONTROL2_RCPS_SHIFT 7
+#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
+#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
+#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
+#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
+#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
+#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
+#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
+#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
+
+#define MAX77693_CONTROL3_JIGSET_SHIFT 0
+#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
+#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
+#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
+#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
+#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
/* Slave addr = 0x90: Haptic */
enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
MAX77693_MUIC_IRQ_NR,
};
-struct max77693_dev {
- struct device *dev;
- struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
- struct i2c_client *muic; /* 0x4A , MUIC */
- struct i2c_client *haptic; /* 0x90 , Haptic */
-
- int type;
-
- struct regmap *regmap;
- struct regmap *regmap_muic;
- struct regmap *regmap_haptic;
-
- struct regmap_irq_chip_data *irq_data_led;
- struct regmap_irq_chip_data *irq_data_topsys;
- struct regmap_irq_chip_data *irq_data_charger;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
- int irq_gpio;
- struct mutex irqlock;
- int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
- int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
-};
-
-enum max77693_types {
- TYPE_MAX77693,
-};
-
-extern int max77693_irq_init(struct max77693_dev *max77686);
-extern void max77693_irq_exit(struct max77693_dev *max77686);
-extern int max77693_irq_resume(struct max77693_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/kernel/include/linux/mfd/max77843-private.h b/kernel/include/linux/mfd/max77843-private.h
index 7178ace83..c19303b0c 100644
--- a/kernel/include/linux/mfd/max77843-private.h
+++ b/kernel/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
/* MAX77843 STATUS register*/
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCERROR_SHIFT 6
-#define STATUS1_ADC1K_SHIFT 7
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DXOVP_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS3_VBADC_SHIFT 0
-#define STATUS3_VDNMON_SHIFT 4
-#define STATUS3_DNRES_SHIFT 5
-#define STATUS3_MPNACK_SHIFT 6
-
-#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
-#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
-#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
-#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
-#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
-#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
-#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
-#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
+#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
+#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
+#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
+#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
+#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
+#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
+#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
+#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
+#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
/* MAX77843 CONTROL register */
-#define CONTROL1_COMP1SW_SHIFT 0
-#define CONTROL1_COMP2SW_SHIFT 3
-#define CONTROL1_IDBEN_SHIFT 7
-#define CONTROL2_LOWPWR_SHIFT 0
-#define CONTROL2_ADCEN_SHIFT 1
-#define CONTROL2_CPEN_SHIFT 2
-#define CONTROL2_ACC_DET_SHIFT 5
-#define CONTROL2_USBCPINT_SHIFT 6
-#define CONTROL2_RCPS_SHIFT 7
-#define CONTROL3_JIGSET_SHIFT 0
-#define CONTROL4_ADCDBSET_SHIFT 0
-#define CONTROL4_USBAUTO_SHIFT 4
-#define CONTROL4_FCTAUTO_SHIFT 5
-#define CONTROL4_ADCMODE_SHIFT 6
-
-#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
-#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
-#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
-#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
+#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
+#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
+#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
+#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
+#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
+#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
+#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
/* MAX77843 switch port */
#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
#define COM_AUX_USB 4
#define COM_AUX_UART 5
-#define CONTROL1_COM_SW \
+#define MAX77843_MUIC_CONTROL1_COM_SW \
((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
-#define CONTROL1_SW_OPEN \
- ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
- COM_OPEN << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_USB \
- ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUDIO \
- ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
- COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_UART \
- ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_UART << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_USB \
- ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_UART \
- ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_OPEN \
+ ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_USB \
+ ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_UART \
+ ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
#define MAX77843_DISABLE 0
#define MAX77843_ENABLE 1
#define CONTROL4_AUTO_DISABLE \
- ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
#define CONTROL4_AUTO_ENABLE \
- ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
/* MAX77843 SAFEOUT LDO Control register */
#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
(0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
-struct max77843 {
- struct device *dev;
-
- struct i2c_client *i2c;
- struct i2c_client *i2c_chg;
- struct i2c_client *i2c_fuel;
- struct i2c_client *i2c_muic;
-
- struct regmap *regmap;
- struct regmap *regmap_chg;
- struct regmap *regmap_fuel;
- struct regmap *regmap_muic;
-
- struct regmap_irq_chip_data *irq_data;
- struct regmap_irq_chip_data *irq_data_chg;
- struct regmap_irq_chip_data *irq_data_fuel;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
-};
#endif /* __MAX77843_H__ */
diff --git a/kernel/include/linux/mfd/mt6397/core.h b/kernel/include/linux/mfd/mt6397/core.h
index cf5265b0d..45b8e8aa1 100644
--- a/kernel/include/linux/mfd/mt6397/core.h
+++ b/kernel/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@ struct mt6397_chip {
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
+ u16 wake_mask[2];
u16 irq_masks_cur[2];
u16 irq_masks_cache[2];
};
diff --git a/kernel/include/linux/mfd/palmas.h b/kernel/include/linux/mfd/palmas.h
index bb270bd03..13e1d9693 100644
--- a/kernel/include/linux/mfd/palmas.h
+++ b/kernel/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/extcon.h>
+#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -551,10 +552,16 @@ struct palmas_usb {
int vbus_otg_irq;
int vbus_irq;
+ int gpio_id_irq;
+ struct gpio_desc *id_gpiod;
+ unsigned long sw_debounce_jiffies;
+ struct delayed_work wq_detectid;
+
enum palmas_usb_state linkstat;
int wakeup;
bool enable_vbus_detection;
bool enable_id_detection;
+ bool enable_gpio_id_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/kernel/include/linux/mfd/rtsx_pci.h b/kernel/include/linux/mfd/rtsx_pci.h
index ff843e7ca..7eb7cbac0 100644
--- a/kernel/include/linux/mfd/rtsx_pci.h
+++ b/kernel/include/linux/mfd/rtsx_pci.h
@@ -589,6 +589,7 @@
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
#define FUNC_FORCE_CTL 0xFE59
+#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
#define CHANGE_LINK_STATE 0xFE5B
#define RESET_LOAD_REG 0xFE5E
@@ -712,6 +713,7 @@
#define PHY_RCR1 0x02
#define PHY_RCR1_ADP_TIME_4 0x0400
#define PHY_RCR1_VCO_COARSE 0x001F
+#define PHY_RCR1_INIT_27S 0x0A1F
#define PHY_SSCCR2 0x02
#define PHY_SSCCR2_PLL_NCODE 0x0A00
#define PHY_SSCCR2_TIME0 0x001C
@@ -724,6 +726,7 @@
#define PHY_RCR2_FREQSEL_12 0x0040
#define PHY_RCR2_CDR_SC_12P 0x0010
#define PHY_RCR2_CALIB_LATE 0x0002
+#define PHY_RCR2_INIT_27S 0xC152
#define PHY_SSCCR3 0x03
#define PHY_SSCCR3_STEP_IN 0x2740
#define PHY_SSCCR3_CHECK_DELAY 0x0008
@@ -800,12 +803,14 @@
#define PHY_ANA1A_RXT_BIST 0x0500
#define PHY_ANA1A_TXR_BIST 0x0040
#define PHY_ANA1A_REV 0x0006
+#define PHY_FLD0_INIT_27S 0x2546
#define PHY_FLD1 0x1B
#define PHY_FLD2 0x1C
#define PHY_FLD3 0x1D
#define PHY_FLD3_TIMER_4 0x0800
#define PHY_FLD3_TIMER_6 0x0020
#define PHY_FLD3_RXDELINK 0x0004
+#define PHY_FLD3_INIT_27S 0x0004
#define PHY_ANA1D 0x1D
#define PHY_ANA1D_DEBUG_ADDR 0x0004
#define _PHY_FLD0 0x1D
@@ -824,6 +829,7 @@
#define PHY_FLD4_BER_COUNT 0x00E0
#define PHY_FLD4_BER_TIMER 0x000A
#define PHY_FLD4_BER_CHK_EN 0x0001
+#define PHY_FLD4_INIT_27S 0x5C7F
#define PHY_DIG1E 0x1E
#define PHY_DIG1E_REV 0x4000
#define PHY_DIG1E_D0_X_D1 0x1000
diff --git a/kernel/include/linux/mfd/samsung/core.h b/kernel/include/linux/mfd/samsung/core.h
index 75115384f..a06098639 100644
--- a/kernel/include/linux/mfd/samsung/core.h
+++ b/kernel/include/linux/mfd/samsung/core.h
@@ -132,6 +132,10 @@ struct sec_platform_data {
int buck2_init;
int buck3_init;
int buck4_init;
+ /* Whether or not manually set PWRHOLD to low during shutdown. */
+ bool manual_poweroff;
+ /* Disable the WRSTBI (buck voltage warm reset) when probing? */
+ bool disable_wrstbi;
};
/**
diff --git a/kernel/include/linux/mfd/samsung/s2mps11.h b/kernel/include/linux/mfd/samsung/s2mps11.h
index 7981a9d77..b288965e8 100644
--- a/kernel/include/linux/mfd/samsung/s2mps11.h
+++ b/kernel/include/linux/mfd/samsung/s2mps11.h
@@ -179,6 +179,7 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
+#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
#define S2MPS11_BUCK2_RAMP_SHIFT 6
#define S2MPS11_BUCK34_RAMP_SHIFT 4
diff --git a/kernel/include/linux/mfd/samsung/s2mps13.h b/kernel/include/linux/mfd/samsung/s2mps13.h
index b1fd675fa..239e977ba 100644
--- a/kernel/include/linux/mfd/samsung/s2mps13.h
+++ b/kernel/include/linux/mfd/samsung/s2mps13.h
@@ -184,5 +184,6 @@ enum s2mps13_regulators {
* Let's assume that default value will be set.
*/
#define S2MPS13_BUCK_RAMP_DELAY 12500
+#define S2MPS13_REG_WRSTBI_MASK BIT(5)
#endif /* __LINUX_MFD_S2MPS13_H */
diff --git a/kernel/include/linux/mfd/stmpe.h b/kernel/include/linux/mfd/stmpe.h
index c9d869027..cb8388391 100644
--- a/kernel/include/linux/mfd/stmpe.h
+++ b/kernel/include/linux/mfd/stmpe.h
@@ -118,47 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
- * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
- * data
- * @sample_time: ADC converstion time in number of clock.
- * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
- * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
- * recommended is 4.
- * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
- * @ref_sel: ADC reference source
- * (0 -> internal reference, 1 -> external reference)
- * @adc_freq: ADC Clock speed
- * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
- * @ave_ctrl: Sample average control
- * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
- * @touch_det_delay: Touch detect interrupt delay
- * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
- * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
- * recommended is 3
- * @settling: Panel driver settling time
- * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
- * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
- * recommended is 2
- * @fraction_z: Length of the fractional part in z
- * (fraction_z ([0..7]) = Count of the fractional part)
- * recommended is 7
- * @i_drive: current limit value of the touchscreen drivers
- * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
- *
- * */
-struct stmpe_ts_platform_data {
- u8 sample_time;
- u8 mod_12b;
- u8 ref_sel;
- u8 adc_freq;
- u8 ave_ctrl;
- u8 touch_det_delay;
- u8 settling;
- u8 fraction_z;
- u8 i_drive;
-};
-
-/**
* struct stmpe_platform_data - STMPE platform data
* @id: device id to distinguish between multiple STMPEs on the same board
* @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
@@ -168,7 +127,6 @@ struct stmpe_ts_platform_data {
* @irq_over_gpio: true if gpio is used to get irq
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
- * @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
int id;
@@ -178,8 +136,6 @@ struct stmpe_platform_data {
bool irq_over_gpio;
int irq_gpio;
int autosleep_timeout;
-
- struct stmpe_ts_platform_data *ts;
};
#endif
diff --git a/kernel/include/linux/mfd/syscon/atmel-mc.h b/kernel/include/linux/mfd/syscon/atmel-mc.h
new file mode 100644
index 000000000..afd9b8f1e
--- /dev/null
+++ b/kernel/include/linux/mfd/syscon/atmel-mc.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
+ * registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
+#define _LINUX_MFD_SYSCON_ATMEL_MC_H_
+
+/* Memory Controller */
+#define AT91_MC_RCR 0x00
+#define AT91_MC_RCB BIT(0)
+
+#define AT91_MC_ASR 0x04
+#define AT91_MC_UNADD BIT(0)
+#define AT91_MC_MISADD BIT(1)
+#define AT91_MC_ABTSZ GENMASK(9, 8)
+#define AT91_MC_ABTSZ_BYTE (0 << 8)
+#define AT91_MC_ABTSZ_HALFWORD (1 << 8)
+#define AT91_MC_ABTSZ_WORD (2 << 8)
+#define AT91_MC_ABTTYP GENMASK(11, 10)
+#define AT91_MC_ABTTYP_DATAREAD (0 << 10)
+#define AT91_MC_ABTTYP_DATAWRITE (1 << 10)
+#define AT91_MC_ABTTYP_FETCH (2 << 10)
+#define AT91_MC_MST(n) BIT(16 + (n))
+#define AT91_MC_SVMST(n) BIT(24 + (n))
+
+#define AT91_MC_AASR 0x08
+
+#define AT91_MC_MPR 0x0c
+#define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4))
+
+/* External Bus Interface (EBI) registers */
+#define AT91_MC_EBI_CSA 0x60
+#define AT91_MC_EBI_CS(n) BIT(x)
+#define AT91_MC_EBI_NUM_CS 8
+
+#define AT91_MC_EBI_CFGR 0x64
+#define AT91_MC_EBI_DBPUC BIT(0)
+
+/* Static Memory Controller (SMC) registers */
+#define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4))
+#define AT91_MC_SMC_NWS GENMASK(6, 0)
+#define AT91_MC_SMC_NWS_(x) ((x) << 0)
+#define AT91_MC_SMC_WSEN BIT(7)
+#define AT91_MC_SMC_TDF GENMASK(11, 8)
+#define AT91_MC_SMC_TDF_(x) ((x) << 8)
+#define AT91_MC_SMC_TDF_MAX 0xf
+#define AT91_MC_SMC_BAT BIT(12)
+#define AT91_MC_SMC_DBW GENMASK(14, 13)
+#define AT91_MC_SMC_DBW_16 (1 << 13)
+#define AT91_MC_SMC_DBW_8 (2 << 13)
+#define AT91_MC_SMC_DPR BIT(15)
+#define AT91_MC_SMC_ACSS GENMASK(17, 16)
+#define AT91_MC_SMC_ACSS_(x) ((x) << 16)
+#define AT91_MC_SMC_ACSS_MAX 3
+#define AT91_MC_SMC_RWSETUP GENMASK(26, 24)
+#define AT91_MC_SMC_RWSETUP_(x) ((x) << 24)
+#define AT91_MC_SMC_RWHOLD GENMASK(30, 28)
+#define AT91_MC_SMC_RWHOLD_(x) ((x) << 28)
+#define AT91_MC_SMC_RWHOLDSETUP_MAX 7
+
+/* SDRAM Controller registers */
+#define AT91_MC_SDRAMC_MR 0x90
+#define AT91_MC_SDRAMC_MODE GENMASK(3, 0)
+#define AT91_MC_SDRAMC_MODE_NORMAL (0 << 0)
+#define AT91_MC_SDRAMC_MODE_NOP (1 << 0)
+#define AT91_MC_SDRAMC_MODE_PRECHARGE (2 << 0)
+#define AT91_MC_SDRAMC_MODE_LMR (3 << 0)
+#define AT91_MC_SDRAMC_MODE_REFRESH (4 << 0)
+#define AT91_MC_SDRAMC_DBW_16 BIT(4)
+
+#define AT91_MC_SDRAMC_TR 0x94
+#define AT91_MC_SDRAMC_COUNT GENMASK(11, 0)
+
+#define AT91_MC_SDRAMC_CR 0x98
+#define AT91_MC_SDRAMC_NC GENMASK(1, 0)
+#define AT91_MC_SDRAMC_NC_8 (0 << 0)
+#define AT91_MC_SDRAMC_NC_9 (1 << 0)
+#define AT91_MC_SDRAMC_NC_10 (2 << 0)
+#define AT91_MC_SDRAMC_NC_11 (3 << 0)
+#define AT91_MC_SDRAMC_NR GENMASK(3, 2)
+#define AT91_MC_SDRAMC_NR_11 (0 << 2)
+#define AT91_MC_SDRAMC_NR_12 (1 << 2)
+#define AT91_MC_SDRAMC_NR_13 (2 << 2)
+#define AT91_MC_SDRAMC_NB BIT(4)
+#define AT91_MC_SDRAMC_NB_2 (0 << 4)
+#define AT91_MC_SDRAMC_NB_4 (1 << 4)
+#define AT91_MC_SDRAMC_CAS GENMASK(6, 5)
+#define AT91_MC_SDRAMC_CAS_2 (2 << 5)
+#define AT91_MC_SDRAMC_TWR GENMASK(10, 7)
+#define AT91_MC_SDRAMC_TRC GENMASK(14, 11)
+#define AT91_MC_SDRAMC_TRP GENMASK(18, 15)
+#define AT91_MC_SDRAMC_TRCD GENMASK(22, 19)
+#define AT91_MC_SDRAMC_TRAS GENMASK(26, 23)
+#define AT91_MC_SDRAMC_TXSR GENMASK(30, 27)
+
+#define AT91_MC_SDRAMC_SRR 0x9c
+#define AT91_MC_SDRAMC_SRCB BIT(0)
+
+#define AT91_MC_SDRAMC_LPR 0xa0
+#define AT91_MC_SDRAMC_LPCB BIT(0)
+
+#define AT91_MC_SDRAMC_IER 0xa4
+#define AT91_MC_SDRAMC_IDR 0xa8
+#define AT91_MC_SDRAMC_IMR 0xac
+#define AT91_MC_SDRAMC_ISR 0xb0
+#define AT91_MC_SDRAMC_RES BIT(0)
+
+/* Burst Flash Controller register */
+#define AT91_MC_BFC_MR 0xc0
+#define AT91_MC_BFC_BFCOM GENMASK(1, 0)
+#define AT91_MC_BFC_BFCOM_DISABLED (0 << 0)
+#define AT91_MC_BFC_BFCOM_ASYNC (1 << 0)
+#define AT91_MC_BFC_BFCOM_BURST (2 << 0)
+#define AT91_MC_BFC_BFCC GENMASK(3, 2)
+#define AT91_MC_BFC_BFCC_MCK (1 << 2)
+#define AT91_MC_BFC_BFCC_DIV2 (2 << 2)
+#define AT91_MC_BFC_BFCC_DIV4 (3 << 2)
+#define AT91_MC_BFC_AVL GENMASK(7, 4)
+#define AT91_MC_BFC_PAGES GENMASK(10, 8)
+#define AT91_MC_BFC_PAGES_NO_PAGE (0 << 8)
+#define AT91_MC_BFC_PAGES_16 (1 << 8)
+#define AT91_MC_BFC_PAGES_32 (2 << 8)
+#define AT91_MC_BFC_PAGES_64 (3 << 8)
+#define AT91_MC_BFC_PAGES_128 (4 << 8)
+#define AT91_MC_BFC_PAGES_256 (5 << 8)
+#define AT91_MC_BFC_PAGES_512 (6 << 8)
+#define AT91_MC_BFC_PAGES_1024 (7 << 8)
+#define AT91_MC_BFC_OEL GENMASK(13, 12)
+#define AT91_MC_BFC_BAAEN BIT(16)
+#define AT91_MC_BFC_BFOEH BIT(17)
+#define AT91_MC_BFC_MUXEN BIT(18)
+#define AT91_MC_BFC_RDYEN BIT(19)
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_MC_H_ */
diff --git a/kernel/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/kernel/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c82c..558a485d0 100644
--- a/kernel/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/kernel/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+/* For imx6ul iomux gpr register field define */
+#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
+#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
+#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/kernel/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/kernel/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
new file mode 100644
index 000000000..4585d6105
--- /dev/null
+++ b/kernel/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IMX7_IOMUXC_GPR_H
+#define __LINUX_IMX7_IOMUXC_GPR_H
+
+#define IOMUXC_GPR0 0x00
+#define IOMUXC_GPR1 0x04
+#define IOMUXC_GPR2 0x08
+#define IOMUXC_GPR3 0x0c
+#define IOMUXC_GPR4 0x10
+#define IOMUXC_GPR5 0x14
+#define IOMUXC_GPR6 0x18
+#define IOMUXC_GPR7 0x1c
+#define IOMUXC_GPR8 0x20
+#define IOMUXC_GPR9 0x24
+#define IOMUXC_GPR10 0x28
+#define IOMUXC_GPR11 0x2c
+#define IOMUXC_GPR12 0x30
+#define IOMUXC_GPR13 0x34
+#define IOMUXC_GPR14 0x38
+#define IOMUXC_GPR15 0x3c
+#define IOMUXC_GPR16 0x40
+#define IOMUXC_GPR17 0x44
+#define IOMUXC_GPR18 0x48
+#define IOMUXC_GPR19 0x4c
+#define IOMUXC_GPR20 0x50
+#define IOMUXC_GPR21 0x54
+#define IOMUXC_GPR22 0x58
+
+/* For imx7d iomux gpr register field define */
+#define IMX7D_GPR1_IRQ_MASK (0x1 << 12)
+#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13)
+#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14)
+#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13)
+#define IMX7D_GPR1_ENET1_CLK_DIR_MASK (0x1 << 17)
+#define IMX7D_GPR1_ENET2_CLK_DIR_MASK (0x1 << 18)
+#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17)
+
+#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+
+#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/kernel/include/linux/mfd/tps6105x.h b/kernel/include/linux/mfd/tps6105x.h
index 386743dd9..8bc511808 100644
--- a/kernel/include/linux/mfd/tps6105x.h
+++ b/kernel/include/linux/mfd/tps6105x.h
@@ -10,6 +10,7 @@
#define MFD_TPS6105X_H
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/regulator/machine.h>
/*
@@ -82,20 +83,15 @@ struct tps6105x_platform_data {
/**
* struct tps6105x - state holder for the TPS6105x drivers
- * @mutex: mutex to serialize I2C accesses
* @i2c_client: corresponding I2C client
* @regulator: regulator device if used in voltage mode
+ * @regmap: used for i2c communcation on accessing registers
*/
struct tps6105x {
struct tps6105x_platform_data *pdata;
- struct mutex lock;
struct i2c_client *client;
struct regulator_dev *regulator;
+ struct regmap *regmap;
};
-extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value);
-extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf);
-extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
- u8 bitmask, u8 bitvalues);
-
#endif
diff --git a/kernel/include/linux/mic_bus.h b/kernel/include/linux/mic_bus.h
index d5b5f76d5..27d7c95fd 100644
--- a/kernel/include/linux/mic_bus.h
+++ b/kernel/include/linux/mic_bus.h
@@ -91,7 +91,8 @@ struct mbus_hw_ops {
struct mbus_device *
mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, void __iomem *mmio_va);
+ struct mbus_hw_ops *hw_ops, int index,
+ void __iomem *mmio_va);
void mbus_unregister_device(struct mbus_device *mbdev);
int mbus_register_driver(struct mbus_driver *drv);
diff --git a/kernel/include/linux/microchipphy.h b/kernel/include/linux/microchipphy.h
new file mode 100644
index 000000000..eb492d47f
--- /dev/null
+++ b/kernel/include/linux/microchipphy.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MICROCHIPPHY_H
+#define _MICROCHIPPHY_H
+
+#define LAN88XX_INT_MASK (0x19)
+#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
+#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
+
+#define LAN88XX_INT_STS (0x1A)
+#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
+#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_STS_RESERVED_ (0x0010)
+#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_STS_RX_ER_ (0x0001)
+
+#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
+#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
+#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
+#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define LAN88XX_EXT_MODE_CTRL (0x13)
+#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
+#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+/* MMD 3 Registers */
+#define LAN88XX_MMD3_CHIP_ID (32877)
+#define LAN88XX_MMD3_CHIP_REV (32878)
+
+#endif /* _MICROCHIPPHY_H */
diff --git a/kernel/include/linux/miscdevice.h b/kernel/include/linux/miscdevice.h
index 819077c32..543037465 100644
--- a/kernel/include/linux/miscdevice.h
+++ b/kernel/include/linux/miscdevice.h
@@ -49,6 +49,7 @@
#define LOOP_CTRL_MINOR 237
#define VHOST_NET_MINOR 238
#define UHID_MINOR 239
+#define USERIO_MINOR 240
#define MISC_DYNAMIC_MINOR 255
struct device;
@@ -67,7 +68,7 @@ struct miscdevice {
};
extern int misc_register(struct miscdevice *misc);
-extern int misc_deregister(struct miscdevice *misc);
+extern void misc_deregister(struct miscdevice *misc);
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/kernel/include/linux/mlx4/cmd.h b/kernel/include/linux/mlx4/cmd.h
index f62e7cf22..58391f2e0 100644
--- a/kernel/include/linux/mlx4/cmd.h
+++ b/kernel/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
#include <linux/dma-mapping.h>
#include <linux/if_link.h>
+#include <linux/mlx4/device.h>
+#include <linux/netdevice.h>
enum {
/* initialization and general commands */
@@ -300,6 +302,10 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset);
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
diff --git a/kernel/include/linux/mlx4/cq.h b/kernel/include/linux/mlx4/cq.h
index e7ecc12a1..09cebe528 100644
--- a/kernel/include/linux/mlx4/cq.h
+++ b/kernel/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
- MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/kernel/include/linux/mlx4/device.h b/kernel/include/linux/mlx4/device.h
index 83e80ab94..d3133be12 100644
--- a/kernel/include/linux/mlx4/device.h
+++ b/kernel/include/linux/mlx4/device.h
@@ -46,8 +46,9 @@
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
-#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+ (dev_cap).num_ports * MIN_MSIX_P_PORT)
#define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values
@@ -78,7 +79,8 @@ enum {
enum {
MLX4_MAX_PORTS = 2,
- MLX4_MAX_PORT_PKEYS = 128
+ MLX4_MAX_PORT_PKEYS = 128,
+ MLX4_MAX_PORT_GIDS = 128
};
/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -210,6 +212,10 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+ MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
+ MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
};
enum {
@@ -421,6 +427,17 @@ enum {
};
enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
+enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
@@ -528,7 +545,6 @@ struct mlx4_caps {
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
- int comp_pool;
int num_mpts;
int max_fmr_maps;
int num_mtts;
@@ -581,6 +597,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
@@ -771,6 +788,14 @@ union mlx4_ext_av {
struct mlx4_eth_av eth;
};
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do { \
+ if ((value) > U32_MAX) \
+ counter = cpu_to_be32(U32_MAX); \
+ else \
+ counter = cpu_to_be32(value); \
+} while (0)
+
struct mlx4_counter {
u8 reserved1[3];
u8 counter_mode;
@@ -821,6 +846,7 @@ struct mlx4_dev {
struct mlx4_quotas quotas;
struct radix_tree_root qp_table_tree;
u8 rev_id;
+ u8 port_random_macs;
char board_id[MLX4_BOARD_ID_LEN];
int numa_node;
int oper_log_mgm_entry_size;
@@ -829,6 +855,12 @@ struct mlx4_dev {
struct mlx4_vf_dev *dev_vfs;
};
+struct mlx4_clock_params {
+ u64 offset;
+ u8 bar;
+ u8 size;
+};
+
struct mlx4_eqe {
u8 reserved1;
u8 type;
@@ -957,6 +989,7 @@ struct mlx4_mad_ifc {
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
+#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
void handle_port_mgmt_change_event(struct work_struct *work);
@@ -1317,6 +1350,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@ -1332,10 +1367,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1344,6 +1382,7 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
int port);
@@ -1485,4 +1524,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg);
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params);
+
#endif /* MLX4_DEVICE_H */
diff --git a/kernel/include/linux/mlx4/driver.h b/kernel/include/linux/mlx4/driver.h
index 9553a73d2..5a06d9693 100644
--- a/kernel/include/linux/mlx4/driver.h
+++ b/kernel/include/linux/mlx4/driver.h
@@ -59,6 +59,7 @@ struct mlx4_interface {
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
+ void (*activate)(struct mlx4_dev *dev, void *context);
struct list_head list;
enum mlx4_protocol protocol;
int flags;
diff --git a/kernel/include/linux/mlx4/qp.h b/kernel/include/linux/mlx4/qp.h
index 6fed539e5..fe052e234 100644
--- a/kernel/include/linux/mlx4/qp.h
+++ b/kernel/include/linux/mlx4/qp.h
@@ -135,7 +135,10 @@ struct mlx4_rss_context {
struct mlx4_qp_path {
u8 fl;
- u8 vlan_control;
+ union {
+ u8 vlan_control;
+ u8 control;
+ };
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
@@ -156,9 +159,16 @@ struct mlx4_qp_path {
};
enum { /* fl */
- MLX4_FL_CV = 1 << 6,
- MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
+ MLX4_FL_CV = 1 << 6,
+ MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
+ MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
+ MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
};
+
+enum { /* control */
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
+};
+
enum { /* vlan_control */
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
@@ -254,6 +264,8 @@ enum {
MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
};
enum { /* param3 */
@@ -272,7 +284,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
- MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
@@ -435,11 +448,13 @@ enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_VSD = 1 << 1,
MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
+ MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1
};
enum mlx4_update_qp_params_flags {
- MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1,
};
struct mlx4_update_qp_params {
diff --git a/kernel/include/linux/mlx5/cq.h b/kernel/include/linux/mlx5/cq.h
index 2695ced22..b2c9fada8 100644
--- a/kernel/include/linux/mlx5/cq.h
+++ b/kernel/include/linux/mlx5/cq.h
@@ -45,7 +45,7 @@ struct mlx5_core_cq {
atomic_t refcount;
struct completion free;
unsigned vector;
- int irqn;
+ unsigned int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
struct mlx5_uar *uar;
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq, u16 cq_period,
+ u16 cq_max_count);
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/kernel/include/linux/mlx5/device.h b/kernel/include/linux/mlx5/device.h
index abf65c790..0b473cbfa 100644
--- a/kernel/include/linux/mlx5/device.h
+++ b/kernel/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
@@ -70,6 +73,14 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+ u64 ___t = MLX5_GET64(typ, p, fld); \
+ pr_debug(#fld " = 0x%llx\n", ___t); \
+ ___t; \
+})
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
};
enum {
+ MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5,
};
@@ -264,6 +285,7 @@ enum {
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- HCA_CAP_OPMOD_GET_MAX = 0,
- HCA_CAP_OPMOD_GET_CUR = 1,
- HCA_CAP_OPMOD_GET_ODP_MAX = 4,
- HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -414,7 +429,7 @@ struct health_buffer {
__be32 rsvd2;
u8 irisc_index;
u8 synd;
- __be16 ext_sync;
+ __be16 ext_synd;
};
struct mlx5_init_seg {
@@ -424,7 +439,8 @@ struct mlx5_init_seg {
__be32 cmdq_addr_h;
__be32 cmdq_addr_l_sz;
__be32 cmd_dbell;
- __be32 rsvd1[121];
+ __be32 rsvd1[120];
+ __be32 initializing;
struct health_buffer health;
__be32 rsvd2[884];
__be32 health_counter;
@@ -541,6 +557,10 @@ struct mlx5_cmd_prot_block {
u8 sig;
};
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
@@ -554,13 +574,22 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[17];
+ u8 rsvd0[4];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
u8 ml_path;
- u8 rsvd20[4];
+ u8 rsvd20[2];
+ __be16 check_sum;
__be16 slid;
__be32 flags_rqpn;
- u8 rsvd28[4];
- __be32 srqn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
@@ -571,6 +600,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ CQE_RSS_HTYPE_IP = 0x3 << 6,
+ CQE_RSS_HTYPE_L4 = 0x3 << 2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -996,4 +1059,145 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+ return 0;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
#endif /* MLX5_DEVICE_H */
diff --git a/kernel/include/linux/mlx5/driver.h b/kernel/include/linux/mlx5/driver.h
index 9a90e7523..af3efd915 100644
--- a/kernel/include/linux/mlx5/driver.h
+++ b/kernel/include/linux/mlx5/driver.h
@@ -44,7 +44,6 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
};
enum {
- MLX5_MAX_EQ_NAME = 32
+ MLX5_MAX_IRQ_NAME = 32
};
enum {
@@ -104,10 +103,13 @@ enum {
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
+ MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +152,11 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_CLIENT_REREG,
};
+enum mlx5_port_status {
+ MLX5_PORT_UP = 1,
+ MLX5_PORT_DOWN = 2,
+};
+
struct mlx5_uuar_info {
struct mlx5_uar *uars;
int num_uars;
@@ -269,56 +276,7 @@ struct mlx5_cmd {
struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
-};
-
-struct mlx5_general_caps {
- u8 log_max_eq;
- u8 log_max_cq;
- u8 log_max_qp;
- u8 log_max_mkey;
- u8 log_max_pd;
- u8 log_max_srq;
- u8 log_max_strq;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_size;
- u8 log_max_klm_list_size;
- u32 max_cqes;
- int max_wqes;
- u32 max_eqes;
- u32 max_indirection;
- int max_sq_desc_sz;
- int max_rq_desc_sz;
- int max_dc_sq_desc_sz;
- u64 flags;
- u16 stat_rate_support;
- int log_max_msg;
- int num_ports;
- u8 log_max_ra_res_qp;
- u8 log_max_ra_req_qp;
- int max_srq_wqes;
- int bf_reg_size;
- int bf_regs_per_page;
- struct mlx5_port_caps port[MLX5_MAX_PORTS];
- u8 ext_port_cap[MLX5_MAX_PORTS];
- int max_vf;
- u32 reserved_lkey;
- u8 local_ca_ack_delay;
- u8 log_max_mcg;
- u32 max_qp_mcg;
- int min_page_sz;
- int pd_cap;
- u32 max_qp_counters;
- u32 pkey_table_size;
- u8 log_max_ra_req_dc;
- u8 log_max_ra_res_dc;
- u32 uar_sz;
- u8 min_log_pg_sz;
- u8 log_max_xrcd;
- u16 log_uar_page_sz;
-};
-
-struct mlx5_caps {
- struct mlx5_general_caps gen;
+ u8 ext_port_cap;
};
struct mlx5_cmd_mailbox {
@@ -334,8 +292,6 @@ struct mlx5_buf_list {
struct mlx5_buf {
struct mlx5_buf_list direct;
- struct mlx5_buf_list *page_list;
- int nbufs;
int npages;
int size;
u8 page_shift;
@@ -347,11 +303,10 @@ struct mlx5_eq {
u32 cons_index;
struct mlx5_buf buf;
int size;
- u8 irqn;
+ unsigned int irqn;
u8 eqn;
int nent;
u64 mask;
- char name[MLX5_MAX_EQ_NAME];
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
@@ -387,6 +342,8 @@ struct mlx5_core_mr {
enum mlx5_res_type {
MLX5_RES_QP,
+ MLX5_RES_SRQ,
+ MLX5_RES_XSRQ,
};
struct mlx5_core_rsc_common {
@@ -396,6 +353,7 @@ struct mlx5_core_rsc_common {
};
struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
int max_gs;
@@ -414,7 +372,6 @@ struct mlx5_eq_table {
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
- struct msix_entry *msix_arr;
int num_comp_vectors;
/* protect EQs list
*/
@@ -425,7 +382,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
- void __iomem *wc_map;
+ void __iomem *bf_map;
void __iomem *map;
};
@@ -434,9 +391,11 @@ struct mlx5_core_health {
struct health_buffer __iomem *health;
__be32 __iomem *health_counter;
struct timer_list timer;
- struct list_head list;
u32 prev;
int miss_counter;
+ bool sick;
+ struct workqueue_struct *wq;
+ struct work_struct work;
};
struct mlx5_cq_table {
@@ -467,12 +426,21 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
+ struct msix_entry *msix_arr;
+ struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+ struct io_mapping *bf_mapping;
+
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -501,6 +469,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
+ /* protect buffer alocation according to numa node */
+ struct mutex alloc_mutex;
+ int numa_node;
+
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@@ -515,20 +487,45 @@ struct mlx5_priv {
spinlock_t ctx_lock;
};
+enum mlx5_device_state {
+ MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_INTERNAL_ERROR,
+};
+
+enum mlx5_interface_state {
+ MLX5_INTERFACE_STATE_DOWN,
+ MLX5_INTERFACE_STATE_UP,
+};
+
+enum mlx5_pci_status {
+ MLX5_PCI_STATUS_DISABLED,
+ MLX5_PCI_STATUS_ENABLED,
+};
+
struct mlx5_core_dev {
struct pci_dev *pdev;
+ /* sync pci state */
+ struct mutex pci_status_mutex;
+ enum mlx5_pci_status pci_status;
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_caps caps;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
+ enum mlx5_device_state state;
+ /* sync interface state */
+ struct mutex intf_state_mutex;
+ enum mlx5_interface_state interface_state;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param);
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
+ u32 issi;
};
struct mlx5_db {
@@ -549,6 +546,11 @@ enum {
MLX5_COMP_EQ_SIZE = 1024,
};
+enum {
+ MLX5_PTYS_IB = 1 << 0,
+ MLX5_PTYS_EN = 1 << 2,
+};
+
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +586,44 @@ struct mlx5_pas {
u8 log_sz;
};
+enum port_state_policy {
+ MLX5_AAA_000
+};
+
+enum phy_port_state {
+ MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+ u32 field_select;
+ bool sm_virt_aware;
+ bool has_smi;
+ bool has_raw;
+ enum port_state_policy policy;
+ enum phy_port_state phys_state;
+ enum ib_port_state vport_state;
+ u8 port_physical_state;
+ u64 sys_image_guid;
+ u64 port_guid;
+ u64 node_guid;
+ u32 cap_mask1;
+ u32 cap_mask1_perm;
+ u32 cap_mask2;
+ u32 cap_mask2_perm;
+ u16 lid;
+ u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+ u8 lmc;
+ u8 subnet_timeout;
+ u16 sm_lid;
+ u8 sm_sl;
+ u16 qkey_violation_counter;
+ u16 pkey_violation_counter;
+ bool grh_required;
+};
+
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
- if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
return buf->direct.buf + offset;
- else
- return buf->page_list[offset >> PAGE_SHIFT].buf +
- (offset & (PAGE_SIZE - 1));
}
extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +687,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +698,23 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-void mlx5_health_cleanup(void);
-void __init mlx5_health_init(void);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen);
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
@@ -696,7 +733,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
@@ -718,14 +755,15 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -734,7 +772,37 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -745,6 +813,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
@@ -757,6 +827,11 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
+static inline int fw_initializing(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
return mkey >> 8;
@@ -799,6 +874,7 @@ struct mlx5_interface {
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
struct mlx5_profile {
u64 mask;
@@ -809,4 +885,18 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+ if (param > 4) {
+ pr_warn("gid table length is zero\n");
+ return 0;
+ }
+
+ return 8 * (1 << param);
+}
+
+enum {
+ MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+};
+
#endif /* MLX5_DRIVER_H */
diff --git a/kernel/include/linux/mlx5/flow_table.h b/kernel/include/linux/mlx5/flow_table.h
new file mode 100644
index 000000000..5f922c6d4
--- /dev/null
+++ b/kernel/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+ u8 log_sz;
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
diff --git a/kernel/include/linux/mlx5/mlx5_ifc.h b/kernel/include/linux/mlx5/mlx5_ifc.h
index cb3ad17ed..1565324eb 100644
--- a/kernel/include/linux/mlx5/mlx5_ifc.h
+++ b/kernel/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- */
-
+*/
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
enum {
+ MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
+ MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
+ MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
+ MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
+ MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
+ MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
+ MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
+ MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
+ MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
+ MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
+ MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
+ MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
+ MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
+ MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
+ MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+ MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+ MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
+ MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
+ MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+};
+
+enum {
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_QUERY_ISSI = 0x10a,
+ MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
MLX5_CMD_OP_2ERR_QP = 0x507,
MLX5_CMD_OP_2RST_QP = 0x50a,
MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
MLX5_CMD_OP_CREATE_PSV = 0x600,
MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
MLX5_CMD_OP_DESTROY_SRQ = 0x701,
MLX5_CMD_OP_QUERY_SRQ = 0x702,
MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
+ MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
+ MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
MLX5_CMD_OP_CREATE_DCT = 0x710,
MLX5_CMD_OP_DESTROY_DCT = 0x711,
MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
- MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
MLX5_CMD_OP_NOP = 0x80d,
MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
- MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
- MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
- MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
- MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
- MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
- MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
- MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
- MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
MLX5_CMD_OP_QUERY_TIR = 0x903,
- MLX5_CMD_OP_CREATE_TIS = 0x912,
- MLX5_CMD_OP_MODIFY_TIS = 0x913,
- MLX5_CMD_OP_DESTROY_TIS = 0x914,
- MLX5_CMD_OP_QUERY_TIS = 0x915,
MLX5_CMD_OP_CREATE_SQ = 0x904,
MLX5_CMD_OP_MODIFY_SQ = 0x905,
MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,432 @@ enum {
MLX5_CMD_OP_MODIFY_RMP = 0x90d,
MLX5_CMD_OP_DESTROY_RMP = 0x90e,
MLX5_CMD_OP_QUERY_RMP = 0x90f,
- MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
- MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
- MLX5_CMD_OP_MAX = 0x911
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_DESTROY_RQT = 0x918,
+ MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+ u8 outer_dmac[0x1];
+ u8 outer_smac[0x1];
+ u8 outer_ether_type[0x1];
+ u8 reserved_0[0x1];
+ u8 outer_first_prio[0x1];
+ u8 outer_first_cfi[0x1];
+ u8 outer_first_vid[0x1];
+ u8 reserved_1[0x1];
+ u8 outer_second_prio[0x1];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0x1];
+ u8 reserved_2[0x1];
+ u8 outer_sip[0x1];
+ u8 outer_dip[0x1];
+ u8 outer_frag[0x1];
+ u8 outer_ip_protocol[0x1];
+ u8 outer_ip_ecn[0x1];
+ u8 outer_ip_dscp[0x1];
+ u8 outer_udp_sport[0x1];
+ u8 outer_udp_dport[0x1];
+ u8 outer_tcp_sport[0x1];
+ u8 outer_tcp_dport[0x1];
+ u8 outer_tcp_flags[0x1];
+ u8 outer_gre_protocol[0x1];
+ u8 outer_gre_key[0x1];
+ u8 outer_vxlan_vni[0x1];
+ u8 reserved_3[0x5];
+ u8 source_eswitch_port[0x1];
+
+ u8 inner_dmac[0x1];
+ u8 inner_smac[0x1];
+ u8 inner_ether_type[0x1];
+ u8 reserved_4[0x1];
+ u8 inner_first_prio[0x1];
+ u8 inner_first_cfi[0x1];
+ u8 inner_first_vid[0x1];
+ u8 reserved_5[0x1];
+ u8 inner_second_prio[0x1];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0x1];
+ u8 reserved_6[0x1];
+ u8 inner_sip[0x1];
+ u8 inner_dip[0x1];
+ u8 inner_frag[0x1];
+ u8 inner_ip_protocol[0x1];
+ u8 inner_ip_ecn[0x1];
+ u8 inner_ip_dscp[0x1];
+ u8 inner_udp_sport[0x1];
+ u8 inner_udp_dport[0x1];
+ u8 inner_tcp_sport[0x1];
+ u8 inner_tcp_dport[0x1];
+ u8 inner_tcp_flags[0x1];
+ u8 reserved_7[0x9];
+
+ u8 reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 ft_support[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x2];
+ u8 log_max_ft_size[0x6];
+ u8 reserved_2[0x10];
+ u8 max_ft_level[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x18];
+ u8 log_max_ft_num[0x8];
+
+ u8 reserved_5[0x18];
+ u8 log_max_destination[0x8];
+
+ u8 reserved_6[0x18];
+ u8 log_max_flow[0x8];
+
+ u8 reserved_7[0x40];
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+ u8 send[0x1];
+ u8 receive[0x1];
+ u8 write[0x1];
+ u8 read[0x1];
+ u8 reserved_0[0x1];
+ u8 srq_receive[0x1];
+ u8 reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 vlan_tag[0x1];
+ u8 reserved_0[0x1];
+ u8 frag[0x1];
+ u8 reserved_1[0x4];
+ u8 tcp_flags[0x9];
+
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+
+ u8 src_ip[4][0x20];
+
+ u8 dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x10];
+ u8 source_port[0x10];
+
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+
+ u8 outer_second_vlan_tag[0x1];
+ u8 inner_second_vlan_tag[0x1];
+ u8 reserved_2[0xe];
+ u8 gre_protocol[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 vxlan_vni[0x18];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+
+ u8 reserved_6[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+
+ u8 reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+ u8 pa_h[0x20];
+
+ u8 pa_l[0x14];
+ u8 reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+ u8 hi[0x20];
+
+ u8 lo[0x20];
+};
+
+enum {
+ MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
+ MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
+ MLX5_ADS_STAT_RATE_10GBPS = 0x8,
+ MLX5_ADS_STAT_RATE_30GBPS = 0x9,
+ MLX5_ADS_STAT_RATE_5GBPS = 0xa,
+ MLX5_ADS_STAT_RATE_20GBPS = 0xb,
+ MLX5_ADS_STAT_RATE_40GBPS = 0xc,
+ MLX5_ADS_STAT_RATE_60GBPS = 0xd,
+ MLX5_ADS_STAT_RATE_80GBPS = 0xe,
+ MLX5_ADS_STAT_RATE_120GBPS = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_0[0xe];
+ u8 pkey_index[0x10];
+
+ u8 reserved_1[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+
+ u8 ack_timeout[0x5];
+ u8 reserved_2[0x3];
+ u8 src_addr_index[0x8];
+ u8 reserved_3[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+
+ u8 reserved_4[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+
+ u8 reserved_5[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_6[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 port[0x8];
+ u8 rmac_47_32[0x10];
+
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+ u8 reserved_1[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+ u8 reserved_2[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+ u8 reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 reserved_0[0x3];
+ u8 self_lb_en_modifiable[0x1];
+ u8 reserved_1[0x2];
+ u8 max_lso_cap[0x5];
+ u8 reserved_2[0x4];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reserved_3[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 reserved_4[0x2];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x10];
+ u8 lro_min_mss_size[0x10];
+
+ u8 reserved_7[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+ u8 reserved_8[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+ u8 roce_apm[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+
+ u8 reserved_2[0xc];
+ u8 l3_type[0x4];
+ u8 reserved_3[0x8];
+ u8 roce_version[0x8];
+
+ u8 reserved_4[0x10];
+ u8 r_roce_dest_udp_port[0x10];
+
+ u8 r_roce_max_src_udp_port[0x10];
+ u8 r_roce_min_src_udp_port[0x10];
+
+ u8 reserved_5[0x10];
+ u8 roce_address_table_size[0x10];
+
+ u8 reserved_6[0x700];
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+ u8 reserved_0[0x40];
+
+ u8 atomic_req_endianness[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 atomic_operations[0x10];
+
+ u8 reserved_4[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_5[0x10];
+ u8 atomic_size_dc[0x10];
+
+ u8 reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ u8 reserved_0[0x40];
+
+ u8 sig[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+ u8 reserved_3[0x720];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_STRQ = 0x2,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
+};
+
+enum {
+ MLX5_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +613,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_1[0xb];
u8 log_max_qp[0x5];
- u8 log_max_strq_sz[0x8];
- u8 reserved_2[0x3];
- u8 log_max_srqs[0x5];
+ u8 reserved_2[0xb];
+ u8 log_max_srq[0x5];
u8 reserved_3[0x10];
u8 reserved_4[0x8];
@@ -185,123 +649,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0x1d];
+ u8 reserved_15[0xd];
+ u8 gid_table_size[0x10];
- u8 reserved_16[0x6];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 reserved_16[0x4];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
- u8 eswitch_owner[0x1];
- u8 reserved_17[0xa];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 reserved_17[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 reserved_18[0x4];
u8 local_ca_ack_delay[0x5];
- u8 reserved_18[0x8];
+ u8 reserved_19[0x6];
+ u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_19[0x3];
+ u8 reserved_20[0x3];
u8 log_max_msg[0x5];
- u8 reserved_20[0x18];
+ u8 reserved_21[0x18];
u8 stat_rate_support[0x10];
- u8 reserved_21[0x10];
+ u8 reserved_22[0xc];
+ u8 cqe_version[0x4];
- u8 reserved_22[0x10];
+ u8 compact_address_vector[0x1];
+ u8 reserved_23[0xe];
+ u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_24[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_24[0x1];
+ u8 reserved_25[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
- u8 dc[0x1];
- u8 reserved_25[0x2];
+ u8 dct[0x1];
+ u8 reserved_26[0x1];
+ u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 rsz_srq[0x1];
+ u8 reserved_27[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 sniffer_rule_flow[0x1];
- u8 sniffer_rule_vport[0x1];
- u8 sniffer_rule_phy[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_28[0x3];
+ u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_27[0x3];
+ u8 reserved_29[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 reserved_30[0x1];
u8 cd[0x1];
- u8 reserved_28[0x1];
+ u8 reserved_31[0x1];
u8 apm[0x1];
- u8 reserved_29[0x7];
+ u8 reserved_32[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_30[0x4];
+ u8 reserved_33[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_31[0xa];
+ u8 reserved_34[0xa];
u8 uar_sz[0x6];
- u8 reserved_32[0x8];
+ u8 reserved_35[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_33[0xa];
+ u8 reserved_36[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_37[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_34[0x10];
+ u8 reserved_38[0x10];
- u8 reserved_35[0x10];
+ u8 reserved_39[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_36[0x10];
+ u8 reserved_40[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_37[0x10];
+ u8 reserved_41[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_38[0x7];
+ u8 reserved_42[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_39[0x18];
+ u8 reserved_43[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_40[0xb];
+ u8 reserved_44[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_45[0x3];
u8 log_max_pd[0x5];
- u8 reserved_41[0xb];
+ u8 reserved_46[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_42[0x20];
+ u8 reserved_47[0x20];
- u8 reserved_43[0x3];
+ u8 reserved_48[0x3];
u8 log_max_rq[0x5];
- u8 reserved_44[0x3];
+ u8 reserved_49[0x3];
u8 log_max_sq[0x5];
- u8 reserved_45[0x3];
+ u8 reserved_50[0x3];
u8 log_max_tir[0x5];
- u8 reserved_46[0x3];
+ u8 reserved_51[0x3];
u8 log_max_tis[0x5];
- u8 reserved_47[0x13];
- u8 log_max_rq_per_tir[0x5];
- u8 reserved_48[0x3];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_52[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_53[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_54[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_55[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_49[0xe0];
+ u8 reserved_56[0x3];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_57[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_58[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_59[0x3];
+ u8 log_min_stride_sz_sq[0x5];
+
+ u8 reserved_60[0x1b];
+ u8 log_max_wq_sz[0x5];
- u8 reserved_50[0x10];
+ u8 reserved_61[0xa0];
+
+ u8 reserved_62[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_63[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_51[0x100];
+ u8 reserved_64[0x100];
- u8 reserved_52[0x1f];
+ u8 reserved_65[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_53[0x220];
+ u8 reserved_66[0x220];
+};
+
+enum {
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+ u8 reserved_0[0xa00];
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
+ MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
+};
+
+enum {
+ MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
+ MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_0[0x18];
+
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_1[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x8];
+ u8 uar_page[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 hw_counter[0x20];
+
+ u8 sw_counter[0x20];
+
+ u8 reserved_4[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_5[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_6[0x3];
+ u8 log_wq_sz[0x5];
+
+ u8 reserved_7[0x4e0];
+
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_0[0x10];
+ u8 mac_addr_47_32[0x10];
+
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+ u8 reserved_0[0xa0];
+
+ u8 min_time_between_cnps[0x20];
+
+ u8 reserved_1[0x12];
+ u8 cnp_dscp[0x6];
+ u8 reserved_2[0x5];
+ u8 cnp_802p_prio[0x3];
+
+ u8 reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+ u8 reserved_0[0x60];
+
+ u8 reserved_1[0x4];
+ u8 clamp_tgt_rate[0x1];
+ u8 reserved_2[0x3];
+ u8 clamp_tgt_rate_after_time_inc[0x1];
+ u8 reserved_3[0x17];
+
+ u8 reserved_4[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_5[0xe0];
+
+ u8 rate_to_set_on_first_cnp[0x20];
+
+ u8 dce_tcp_g[0x20];
+
+ u8 dce_tcp_rtt[0x20];
+
+ u8 rate_reduce_monitor_period[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 initial_alpha_value[0x20];
+
+ u8 reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+ u8 reserved_0[0x80];
+
+ u8 rppp_max_rps[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_1[0x640];
+};
+
+enum {
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+ u8 resize_field_select[0x20];
+};
+
+enum {
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+ u8 modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+ u8 field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+ u8 field_select_r_roce_rp[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+ u8 field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 symbol_errors_high[0x20];
+
+ u8 symbol_errors_low[0x20];
+
+ u8 sync_headers_errors_high[0x20];
+
+ u8 sync_headers_errors_low[0x20];
+
+ u8 edpl_bip_errors_lane0_high[0x20];
+
+ u8 edpl_bip_errors_lane0_low[0x20];
+
+ u8 edpl_bip_errors_lane1_high[0x20];
+
+ u8 edpl_bip_errors_lane1_low[0x20];
+
+ u8 edpl_bip_errors_lane2_high[0x20];
+
+ u8 edpl_bip_errors_lane2_low[0x20];
+
+ u8 edpl_bip_errors_lane3_high[0x20];
+
+ u8 edpl_bip_errors_lane3_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+ u8 rs_fec_corrected_blocks_high[0x20];
+
+ u8 rs_fec_corrected_blocks_low[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_high[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_low[0x20];
+
+ u8 rs_fec_no_errors_blocks_high[0x20];
+
+ u8 rs_fec_no_errors_blocks_low[0x20];
+
+ u8 rs_fec_single_error_blocks_high[0x20];
+
+ u8 rs_fec_single_error_blocks_low[0x20];
+
+ u8 rs_fec_corrected_symbols_total_high[0x20];
+
+ u8 rs_fec_corrected_symbols_total_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_low[0x20];
+
+ u8 link_down_events[0x20];
+
+ u8 successful_recovery_events[0x20];
+
+ u8 reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+ u8 transmit_queue_high[0x20];
+
+ u8 transmit_queue_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+ u8 rx_octets_high[0x20];
+
+ u8 rx_octets_low[0x20];
+
+ u8 reserved_0[0xc0];
+
+ u8 rx_frames_high[0x20];
+
+ u8 rx_frames_low[0x20];
+
+ u8 tx_octets_high[0x20];
+
+ u8 tx_octets_low[0x20];
+
+ u8 reserved_1[0xc0];
+
+ u8 tx_frames_high[0x20];
+
+ u8 tx_frames_low[0x20];
+
+ u8 rx_pause_high[0x20];
+
+ u8 rx_pause_low[0x20];
+
+ u8 rx_pause_duration_high[0x20];
+
+ u8 rx_pause_duration_low[0x20];
+
+ u8 tx_pause_high[0x20];
+
+ u8 tx_pause_low[0x20];
+
+ u8 tx_pause_duration_high[0x20];
+
+ u8 tx_pause_duration_low[0x20];
+
+ u8 rx_pause_transition_high[0x20];
+
+ u8 rx_pause_transition_low[0x20];
+
+ u8 reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+ u8 port_transmit_wait_high[0x20];
+
+ u8 port_transmit_wait_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+ u8 dot3stats_alignment_errors_high[0x20];
+
+ u8 dot3stats_alignment_errors_low[0x20];
+
+ u8 dot3stats_fcs_errors_high[0x20];
+
+ u8 dot3stats_fcs_errors_low[0x20];
+
+ u8 dot3stats_single_collision_frames_high[0x20];
+
+ u8 dot3stats_single_collision_frames_low[0x20];
+
+ u8 dot3stats_multiple_collision_frames_high[0x20];
+
+ u8 dot3stats_multiple_collision_frames_low[0x20];
+
+ u8 dot3stats_sqe_test_errors_high[0x20];
+
+ u8 dot3stats_sqe_test_errors_low[0x20];
+
+ u8 dot3stats_deferred_transmissions_high[0x20];
+
+ u8 dot3stats_deferred_transmissions_low[0x20];
+
+ u8 dot3stats_late_collisions_high[0x20];
+
+ u8 dot3stats_late_collisions_low[0x20];
+
+ u8 dot3stats_excessive_collisions_high[0x20];
+
+ u8 dot3stats_excessive_collisions_low[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_low[0x20];
+
+ u8 dot3stats_carrier_sense_errors_high[0x20];
+
+ u8 dot3stats_carrier_sense_errors_low[0x20];
+
+ u8 dot3stats_frame_too_longs_high[0x20];
+
+ u8 dot3stats_frame_too_longs_low[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_low[0x20];
+
+ u8 dot3stats_symbol_errors_high[0x20];
+
+ u8 dot3stats_symbol_errors_low[0x20];
+
+ u8 dot3control_in_unknown_opcodes_high[0x20];
+
+ u8 dot3control_in_unknown_opcodes_low[0x20];
+
+ u8 dot3in_pause_frames_high[0x20];
+
+ u8 dot3in_pause_frames_low[0x20];
+
+ u8 dot3out_pause_frames_high[0x20];
+
+ u8 dot3out_pause_frames_low[0x20];
+
+ u8 reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+ u8 ether_stats_drop_events_high[0x20];
+
+ u8 ether_stats_drop_events_low[0x20];
+
+ u8 ether_stats_octets_high[0x20];
+
+ u8 ether_stats_octets_low[0x20];
+
+ u8 ether_stats_pkts_high[0x20];
+
+ u8 ether_stats_pkts_low[0x20];
+
+ u8 ether_stats_broadcast_pkts_high[0x20];
+
+ u8 ether_stats_broadcast_pkts_low[0x20];
+
+ u8 ether_stats_multicast_pkts_high[0x20];
+
+ u8 ether_stats_multicast_pkts_low[0x20];
+
+ u8 ether_stats_crc_align_errors_high[0x20];
+
+ u8 ether_stats_crc_align_errors_low[0x20];
+
+ u8 ether_stats_undersize_pkts_high[0x20];
+
+ u8 ether_stats_undersize_pkts_low[0x20];
+
+ u8 ether_stats_oversize_pkts_high[0x20];
+
+ u8 ether_stats_oversize_pkts_low[0x20];
+
+ u8 ether_stats_fragments_high[0x20];
+
+ u8 ether_stats_fragments_low[0x20];
+
+ u8 ether_stats_jabbers_high[0x20];
+
+ u8 ether_stats_jabbers_low[0x20];
+
+ u8 ether_stats_collisions_high[0x20];
+
+ u8 ether_stats_collisions_low[0x20];
+
+ u8 ether_stats_pkts64octets_high[0x20];
+
+ u8 ether_stats_pkts64octets_low[0x20];
+
+ u8 ether_stats_pkts65to127octets_high[0x20];
+
+ u8 ether_stats_pkts65to127octets_low[0x20];
+
+ u8 ether_stats_pkts128to255octets_high[0x20];
+
+ u8 ether_stats_pkts128to255octets_low[0x20];
+
+ u8 ether_stats_pkts256to511octets_high[0x20];
+
+ u8 ether_stats_pkts256to511octets_low[0x20];
+
+ u8 ether_stats_pkts512to1023octets_high[0x20];
+
+ u8 ether_stats_pkts512to1023octets_low[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_high[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_low[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_high[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_low[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_high[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_low[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_high[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_low[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_high[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_low[0x20];
+
+ u8 reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+ u8 if_in_octets_high[0x20];
+
+ u8 if_in_octets_low[0x20];
+
+ u8 if_in_ucast_pkts_high[0x20];
+
+ u8 if_in_ucast_pkts_low[0x20];
+
+ u8 if_in_discards_high[0x20];
+
+ u8 if_in_discards_low[0x20];
+
+ u8 if_in_errors_high[0x20];
+
+ u8 if_in_errors_low[0x20];
+
+ u8 if_in_unknown_protos_high[0x20];
+
+ u8 if_in_unknown_protos_low[0x20];
+
+ u8 if_out_octets_high[0x20];
+
+ u8 if_out_octets_low[0x20];
+
+ u8 if_out_ucast_pkts_high[0x20];
+
+ u8 if_out_ucast_pkts_low[0x20];
+
+ u8 if_out_discards_high[0x20];
+
+ u8 if_out_discards_low[0x20];
+
+ u8 if_out_errors_high[0x20];
+
+ u8 if_out_errors_low[0x20];
+
+ u8 if_in_multicast_pkts_high[0x20];
+
+ u8 if_in_multicast_pkts_low[0x20];
+
+ u8 if_in_broadcast_pkts_high[0x20];
+
+ u8 if_in_broadcast_pkts_low[0x20];
+
+ u8 if_out_multicast_pkts_high[0x20];
+
+ u8 if_out_multicast_pkts_low[0x20];
+
+ u8 if_out_broadcast_pkts_high[0x20];
+
+ u8 if_out_broadcast_pkts_low[0x20];
+
+ u8 reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+ u8 a_frames_transmitted_ok_high[0x20];
+
+ u8 a_frames_transmitted_ok_low[0x20];
+
+ u8 a_frames_received_ok_high[0x20];
+
+ u8 a_frames_received_ok_low[0x20];
+
+ u8 a_frame_check_sequence_errors_high[0x20];
+
+ u8 a_frame_check_sequence_errors_low[0x20];
+
+ u8 a_alignment_errors_high[0x20];
+
+ u8 a_alignment_errors_low[0x20];
+
+ u8 a_octets_transmitted_ok_high[0x20];
+
+ u8 a_octets_transmitted_ok_low[0x20];
+
+ u8 a_octets_received_ok_high[0x20];
+
+ u8 a_octets_received_ok_low[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_high[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_low[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_high[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_low[0x20];
+
+ u8 a_multicast_frames_received_ok_high[0x20];
+
+ u8 a_multicast_frames_received_ok_low[0x20];
+
+ u8 a_broadcast_frames_received_ok_high[0x20];
+
+ u8 a_broadcast_frames_received_ok_low[0x20];
+
+ u8 a_in_range_length_errors_high[0x20];
+
+ u8 a_in_range_length_errors_low[0x20];
+
+ u8 a_out_of_range_length_field_high[0x20];
+
+ u8 a_out_of_range_length_field_low[0x20];
+
+ u8 a_frame_too_long_errors_high[0x20];
+
+ u8 a_frame_too_long_errors_low[0x20];
+
+ u8 a_symbol_error_during_carrier_high[0x20];
+
+ u8 a_symbol_error_during_carrier_low[0x20];
+
+ u8 a_mac_control_frames_transmitted_high[0x20];
+
+ u8 a_mac_control_frames_transmitted_low[0x20];
+
+ u8 a_mac_control_frames_received_high[0x20];
+
+ u8 a_mac_control_frames_received_low[0x20];
+
+ u8 a_unsupported_opcodes_received_high[0x20];
+
+ u8 a_unsupported_opcodes_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+ u8 reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+ u8 command_completion_vector[0x20];
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+ u8 reserved_0[0x18];
+ u8 port_num[0x1];
+ u8 reserved_1[0x3];
+ u8 vl[0x4];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+ u8 event_subtype[0x8];
+ u8 reserved_0[0x8];
+ u8 congestion_level[0x8];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+ u8 reserved_0[0x60];
+
+ u8 gpio_event_hi[0x20];
+
+ u8 gpio_event_lo[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+ u8 reserved_0[0x40];
+
+ u8 port_num[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+ u8 reserved_0[0xe0];
+};
+
+enum {
+ MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
+ MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+ u8 reserved_0[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 r_key[0x20];
+
+ u8 reserved_0[0x10];
+ u8 packet_len[0x10];
+
+ u8 rdma_op_len[0x20];
+
+ u8 rdma_va[0x40];
+
+ u8 reserved_1[0x5];
+ u8 rdma[0x1];
+ u8 write[0x1];
+ u8 requestor[0x1];
+ u8 qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 reserved_0[0x10];
+ u8 wqe_index[0x10];
+
+ u8 reserved_1[0x10];
+ u8 len[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x5];
+ u8 rdma[0x1];
+ u8 write_read[0x1];
+ u8 requestor[0x1];
+ u8 qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+ u8 reserved_0[0xa0];
+
+ u8 type[0x8];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 cq_number[0x18];
+};
+
+enum {
+ MLX5_QPC_STATE_RST = 0x0,
+ MLX5_QPC_STATE_INIT = 0x1,
+ MLX5_QPC_STATE_RTR = 0x2,
+ MLX5_QPC_STATE_RTS = 0x3,
+ MLX5_QPC_STATE_SQER = 0x4,
+ MLX5_QPC_STATE_ERR = 0x6,
+ MLX5_QPC_STATE_SQD = 0x7,
+ MLX5_QPC_STATE_SUSPENDED = 0x9,
+};
+
+enum {
+ MLX5_QPC_ST_RC = 0x0,
+ MLX5_QPC_ST_UC = 0x1,
+ MLX5_QPC_ST_UD = 0x2,
+ MLX5_QPC_ST_XRC = 0x3,
+ MLX5_QPC_ST_DCI = 0x5,
+ MLX5_QPC_ST_QP0 = 0x7,
+ MLX5_QPC_ST_QP1 = 0x8,
+ MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
+ MLX5_QPC_ST_REG_UMR = 0xc,
+};
+
+enum {
+ MLX5_QPC_PM_STATE_ARMED = 0x0,
+ MLX5_QPC_PM_STATE_REARM = 0x1,
+ MLX5_QPC_PM_STATE_RESERVED = 0x2,
+ MLX5_QPC_PM_STATE_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
+ MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
+};
+
+enum {
+ MLX5_QPC_MTU_256_BYTES = 0x1,
+ MLX5_QPC_MTU_512_BYTES = 0x2,
+ MLX5_QPC_MTU_1K_BYTES = 0x3,
+ MLX5_QPC_MTU_2K_BYTES = 0x4,
+ MLX5_QPC_MTU_4K_BYTES = 0x5,
+ MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
+};
+
+enum {
+ MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
+ MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
+};
+
+enum {
+ MLX5_QPC_CS_REQ_DISABLE = 0x0,
+ MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
+ MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
+};
+
+enum {
+ MLX5_QPC_CS_RES_DISABLE = 0x0,
+ MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
+ MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 reserved_0[0x4];
+ u8 st[0x8];
+ u8 reserved_1[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_2[0x7];
+ u8 end_padding_mode[0x2];
+ u8 reserved_3[0x2];
+
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_4[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_5[0x2];
+ u8 pd[0x18];
+
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_6[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_7[0x6];
+ u8 rlky[0x1];
+ u8 reserved_8[0x4];
+
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+
+ u8 reserved_9[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+
+ struct mlx5_ifc_ads_bits primary_address_path;
+
+ struct mlx5_ifc_ads_bits secondary_address_path;
+
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_11[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_12[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_13[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_14[0x5];
+
+ u8 reserved_15[0x20];
+
+ u8 reserved_16[0x8];
+ u8 next_send_psn[0x18];
+
+ u8 reserved_17[0x8];
+ u8 cqn_snd[0x18];
+
+ u8 reserved_18[0x40];
+
+ u8 reserved_19[0x8];
+ u8 last_acked_psn[0x18];
+
+ u8 reserved_20[0x8];
+ u8 ssn[0x18];
+
+ u8 reserved_21[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_22[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_23[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_24[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+
+ u8 reserved_25[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+
+ u8 reserved_26[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_27[0x8];
+ u8 cqn_rcv[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 q_key[0x20];
+
+ u8 reserved_28[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn[0x18];
+
+ u8 reserved_29[0x8];
+ u8 rmsn[0x18];
+
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+
+ u8 hw_rq_counter[0x20];
+
+ u8 sw_rq_counter[0x20];
+
+ u8 reserved_30[0x20];
+
+ u8 reserved_31[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+ u8 source_l3_address[16][0x8];
+
+ u8 reserved_0[0x3];
+ u8 vlan_valid[0x1];
+ u8 vlan_id[0xc];
+ u8 source_mac_47_32[0x10];
+
+ u8 source_mac_31_0[0x20];
+
+ u8 reserved_1[0x14];
+ u8 roce_l3_type[0x4];
+ u8 roce_version[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_odp_cap_bits odp_cap;
+ struct mlx5_ifc_atomic_caps_bits atomic_caps;
+ struct mlx5_ifc_roce_cap_bits roce_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ u8 reserved_0[0x8000];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+ u8 reserved_0[0x20];
+
+ u8 group_id[0x20];
+
+ u8 reserved_1[0x8];
+ u8 flow_tag[0x18];
+
+ u8 reserved_2[0x10];
+ u8 action[0x10];
+
+ u8 reserved_3[0x8];
+ u8 destination_list_size[0x18];
+
+ u8 reserved_4[0x160];
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+ u8 reserved_5[0x600];
+
+ struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+ MLX5_XRC_SRQC_STATE_GOOD = 0x0,
+ MLX5_XRC_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+ u8 state[0x4];
+ u8 log_xrc_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_2[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 user_index_equal_xrc_srqn[0x1];
+ u8 reserved_4[0x1];
+ u8 log_page_size[0x6];
+ u8 user_index[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_7[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_8[0x2];
+
+ u8 reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 reserved_0[0xc];
+ u8 prio[0x4];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x100];
+
+ u8 reserved_3[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_4[0x3c0];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_0[0x20];
+
+ u8 disp_type[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x40];
+
+ u8 reserved_3[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_ip_payload_size[0x8];
+
+ u8 reserved_4[0x40];
+
+ u8 reserved_5[0x8];
+ u8 inline_rqn[0x18];
+
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_6[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_7[0x5];
+ u8 indirect_table[0x18];
+
+ u8 rx_hash_fn[0x4];
+ u8 reserved_8[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+
+ u8 rx_hash_toeplitz_key[10][0x20];
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+ u8 reserved_9[0x4c0];
+};
+
+enum {
+ MLX5_SRQC_STATE_GOOD = 0x0,
+ MLX5_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+ u8 state[0x4];
+ u8 log_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 reserved_2[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_3[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x2];
+ u8 log_page_size[0x6];
+ u8 reserved_6[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_9[0x40];
+
+ u8 dbr_addr[0x40];
+
+ u8 reserved_10[0x80];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0xa0];
+
+ u8 tis_lst_sz[0x10];
+ u8 reserved_5[0x10];
+
+ u8 reserved_6[0x40];
+
+ u8 reserved_7[0x8];
+ u8 tis_num_0[0x18];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_0[0xa0];
+
+ u8 reserved_1[0x10];
+ u8 rqt_max_size[0x10];
+
+ u8 reserved_2[0x10];
+ u8 rqt_actual_size[0x10];
+
+ u8 reserved_3[0x6a0];
+
+ struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 reserved_0[0x2];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_2[0x12];
+
+ u8 reserved_3[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 reserved_5[0x18];
+
+ u8 reserved_6[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_7[0xe0];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_RMPC_STATE_RDY = 0x1,
+ MLX5_RMPC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+ u8 reserved_0[0x8];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x140];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_0[0x1f];
+ u8 roce_en[0x1];
+
+ u8 reserved_1[0x760];
+
+ u8 reserved_2[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_3[0xc];
+ u8 allowed_list_size[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+ u8 reserved_4[0x20];
+
+ u8 current_uc_mac_address[0][0x40];
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_PA = 0x0,
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+ MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_0[0x1];
+ u8 free[0x1];
+ u8 reserved_1[0xd];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_2[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_4[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_5[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_6[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_7[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+ u8 reserved_0[0x10];
+ u8 pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+ u8 array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+ u8 field_select[0x20];
+
+ u8 reserved_0[0xe0];
+
+ u8 sm_virt_aware[0x1];
+ u8 has_smi[0x1];
+ u8 has_raw[0x1];
+ u8 grh_required[0x1];
+ u8 reserved_1[0xc];
+ u8 port_physical_state[0x4];
+ u8 vport_state_policy[0x4];
+ u8 port_state[0x4];
+ u8 vport_state[0x4];
+
+ u8 reserved_2[0x20];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 cap_mask1[0x20];
+
+ u8 cap_mask1_field_select[0x20];
+
+ u8 cap_mask2[0x20];
+
+ u8 cap_mask2_field_select[0x20];
+
+ u8 reserved_3[0x80];
+
+ u8 lid[0x10];
+ u8 reserved_4[0x4];
+ u8 init_type_reply[0x4];
+ u8 lmc[0x3];
+ u8 subnet_timeout[0x5];
+
+ u8 sm_lid[0x10];
+ u8 sm_sl[0x4];
+ u8 reserved_5[0xc];
+
+ u8 qkey_violation_counter[0x10];
+ u8 pkey_violation_counter[0x10];
+
+ u8 reserved_6[0xca0];
+};
+
+enum {
+ MLX5_EQC_STATUS_OK = 0x0,
+ MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
+};
+
+enum {
+ MLX5_EQC_ST_ARMED = 0x9,
+ MLX5_EQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x9];
+ u8 ec[0x1];
+ u8 oi[0x1];
+ u8 reserved_1[0x5];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_eq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x18];
+ u8 intr[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x60];
+
+ u8 reserved_12[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_13[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_14[0x80];
+};
+
+enum {
+ MLX5_DCTC_STATE_ACTIVE = 0x0,
+ MLX5_DCTC_STATE_DRAINING = 0x1,
+ MLX5_DCTC_STATE_DRAINED = 0x2,
+};
+
+enum {
+ MLX5_DCTC_CS_RES_DISABLE = 0x0,
+ MLX5_DCTC_CS_RES_NA = 0x1,
+ MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
+};
+
+enum {
+ MLX5_DCTC_MTU_256_BYTES = 0x1,
+ MLX5_DCTC_MTU_512_BYTES = 0x2,
+ MLX5_DCTC_MTU_1K_BYTES = 0x3,
+ MLX5_DCTC_MTU_2K_BYTES = 0x4,
+ MLX5_DCTC_MTU_4K_BYTES = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 rlky[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_4[0xd];
+
+ u8 reserved_5[0x8];
+ u8 cs_res[0x8];
+ u8 reserved_6[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 reserved_7[0x8];
+
+ u8 reserved_8[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_9[0x8];
+ u8 pd[0x18];
+
+ u8 tclass[0x8];
+ u8 reserved_10[0x4];
+ u8 flow_label[0x14];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_11[0x5];
+ u8 mtu[0x3];
+ u8 port[0x8];
+ u8 pkey_index[0x10];
+
+ u8 reserved_12[0x8];
+ u8 my_addr_index[0x8];
+ u8 reserved_13[0x8];
+ u8 hop_limit[0x8];
+
+ u8 dc_access_key_violation_count[0x20];
+
+ u8 reserved_14[0x14];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+
+ u8 reserved_15[0x40];
+};
+
+enum {
+ MLX5_CQC_STATUS_OK = 0x0,
+ MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
+ MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
+};
+
+enum {
+ MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
+ MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
+};
+
+enum {
+ MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
+ MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
+ MLX5_CQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x4];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_1[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 reserved_2[0x2];
+ u8 cqe_zip_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_6[0x6];
+
+ u8 reserved_7[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_8[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+
+ u8 reserved_9[0x18];
+ u8 c_eqn[0x8];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_11[0x18];
+
+ u8 reserved_12[0x20];
+
+ u8 reserved_13[0x8];
+ u8 last_notified_index[0x18];
+
+ u8 reserved_14[0x8];
+ u8 last_solicit_index[0x18];
+
+ u8 reserved_15[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_16[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_17[0x40];
+
+ u8 dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+ struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ u8 reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 reserved_2[0x10];
+ u8 vsd_vendor_id[0x10];
+
+ u8 vsd[208][0x8];
+
+ u8 vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+ struct mlx5_ifc_modify_field_select_bits modify_field_select;
+ struct mlx5_ifc_resize_field_select_bits resize_field_select;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+ struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+ struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+ struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ u8 reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+ struct mlx5_ifc_comp_event_bits comp_event;
+ struct mlx5_ifc_dct_events_bits dct_events;
+ struct mlx5_ifc_qp_events_bits qp_events;
+ struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+ struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+ struct mlx5_ifc_cq_error_bits cq_error;
+ struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+ struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+ struct mlx5_ifc_gpio_event_bits gpio_event;
+ struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+ struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+ struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+ u8 reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+ u8 reserved_0[0x100];
+
+ u8 assert_existptr[0x20];
+
+ u8 assert_callra[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 fw_version[0x20];
+
+ u8 hw_id[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 irisc_index[0x8];
+ u8 synd[0x8];
+ u8 ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+ u8 no_lb[0x1];
+ u8 reserved_0[0x7];
+ u8 port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 profile[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x6];
+ u8 demux_mode[0x2];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
};
struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2766,710 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_2[0x40];
- struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_query_hca_cap_in_bits {
+struct mlx5_ifc_set_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 admin_state[0x4];
+ u8 state[0x4];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits received_errors;
+
+ struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+ u8 reserved_2[0xa00];
+};
+
+enum {
+ MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x60];
+
+ u8 clear[0x1];
+ u8 reserved_4[0x1f];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_3[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 rx_write_requests[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 rx_read_requests[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 rx_atomic_requests[0x20];
+
+ u8 reserved_4[0x20];
+
+ u8 rx_dct_connect[0x20];
+
+ u8 reserved_5[0x20];
+
+ u8 out_of_buffer[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 out_of_sequence[0x20];
+
+ u8 reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x18];
+ u8 counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+};
+
+enum {
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
+ MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 bsf0_klm0_pas_mtt0_1[16][0x8];
+
+ u8 bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 reserved_2[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_2[0xa0];
+
+ u8 supported_issi_reserved[76][0x8];
+ u8 supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -326,6 +3479,89 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_2[0x40];
};
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 gids_num[0x10];
+ u8 reserved_2[0x10];
+
+ struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -334,16 +3570,3232 @@ struct mlx5_ifc_query_hca_cap_out_bits {
u8 reserved_1[0x40];
- u8 capability_struct[256][0x8];
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_set_hca_cap_out_bits {
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x80];
+
+ u8 reserved_2[0x8];
+ u8 level[0x8];
+ u8 reserved_3[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x1c0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_3[0xa0];
+
+ u8 reserved_4[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_2[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_3[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 cur_flows[0x20];
+
+ u8 sum_flows[0x20];
+
+ u8 cnp_ignored_high[0x20];
+
+ u8 cnp_ignored_low[0x20];
+
+ u8 cnp_handled_high[0x20];
+
+ u8 cnp_handled_low[0x20];
+
+ u8 reserved_2[0x100];
+
+ u8 time_stamp_high[0x20];
+
+ u8 time_stamp_low[0x20];
+
+ u8 accumulators_period[0x20];
+
+ u8 ecn_marked_roce_packets_high[0x20];
+
+ u8 ecn_marked_roce_packets_low[0x20];
+
+ u8 cnps_sent_high[0x20];
+
+ u8 cnps_sent_low[0x20];
+
+ u8 reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 clear[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 error[0x1];
+ u8 reserved_2[0x4];
+ u8 rdma[0x1];
+ u8 read_write[0x1];
+ u8 req_res[0x1];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x18];
+ u8 admin_state[0x4];
+ u8 reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_bitmask_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x1b];
+ u8 self_lb_en[0x1];
+ u8 reserved_2[0x3];
+ u8 lro[0x1];
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 sq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rqt_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 rqn_list[0x1];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rqt_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rmp_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+ u8 reserved_0[0x1c];
+ u8 permanent_address[0x1];
+ u8 addresses_list[0x1];
+ u8 roce_en[0x1];
+ u8 reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+ u8 reserved_3[0x780];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
+ MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+ u8 reserved_3[0x80];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 output_num_entries[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 pas[0][0x40];
+};
+
+enum {
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 input_num_entries[0x20];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 remote_lid[0x10];
+ u8 reserved_2[0x8];
+ u8 port[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 packet_headers_log[128][0x8];
+
+ u8 packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 psvn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_4[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 reserved_2[0x8];
+ u8 psv0_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 psv1_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 psv2_index[0x18];
+
+ u8 reserved_5[0x8];
+ u8 psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_psv[0x4];
+ u8 reserved_2[0x4];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_4[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 reserved_5[0x560];
+
+ u8 klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x8];
+ u8 level[0x8];
+ u8 reserved_6[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 group_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_7[0xa0];
+
+ u8 reserved_8[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_3[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_4[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srq_number[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dct_number[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 register_id[0x10];
+
+ u8 argument[0x20];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x7];
+ u8 polarity[0x1];
+ u8 ob_tap0[0x8];
+ u8 ob_tap1[0x8];
+ u8 ob_tap2[0x8];
+
+ u8 reserved_4[0xc];
+ u8 ob_preemp_mode[0x4];
+ u8 ob_reg[0x8];
+ u8 ob_bias[0x8];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 time_to_link_up[0x10];
+ u8 reserved_2[0xc];
+ u8 grade_lane_speed[0x4];
+
+ u8 grade_version[0x8];
+ u8 grade[0x18];
+
+ u8 reserved_3[0x4];
+ u8 height_grade_type[0x4];
+ u8 height_grade[0x18];
+
+ u8 height_dz[0x10];
+ u8 height_dv[0x10];
+
+ u8 reserved_4[0x10];
+ u8 height_sigma[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x4];
+ u8 phase_grade_type[0x4];
+ u8 phase_grade[0x18];
+
+ u8 reserved_7[0x8];
+ u8 phase_eo_pos[0x8];
+ u8 reserved_8[0x8];
+ u8 phase_eo_neg[0x8];
+
+ u8 ffe_set_tested[0x10];
+ u8 test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 vl_hw_cap[0x4];
+
+ u8 reserved_3[0x1c];
+ u8 vl_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 proto_mask[0x3];
+
+ u8 reserved_2[0x40];
+
+ u8 eth_proto_capability[0x20];
+
+ u8 ib_link_width_capability[0x10];
+ u8 ib_proto_capability[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 eth_proto_admin[0x20];
+
+ u8 ib_link_width_admin[0x10];
+ u8 ib_proto_admin[0x10];
+
+ u8 reserved_4[0x20];
+
+ u8 eth_proto_oper[0x20];
+
+ u8 ib_link_width_oper[0x10];
+ u8 ib_proto_oper[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 eth_proto_lp_advertise[0x20];
+
+ u8 reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+ u8 reserved_0[0x20];
+
+ u8 algorithm_options[0x10];
+ u8 reserved_1[0x4];
+ u8 repetitions_mode[0x4];
+ u8 num_of_repetitions[0x8];
+
+ u8 grade_version[0x8];
+ u8 height_grade_type[0x4];
+ u8 phase_grade_type[0x4];
+ u8 height_grade_weight[0x8];
+ u8 phase_grade_weight[0x8];
+
+ u8 gisim_measure_bits[0x10];
+ u8 adaptive_tap_measure_bits[0x10];
+
+ u8 ber_bath_high_error_threshold[0x10];
+ u8 ber_bath_mid_error_threshold[0x10];
+
+ u8 ber_bath_low_error_threshold[0x10];
+ u8 one_ratio_high_threshold[0x10];
+
+ u8 one_ratio_high_mid_threshold[0x10];
+ u8 one_ratio_low_mid_threshold[0x10];
+
+ u8 one_ratio_low_threshold[0x10];
+ u8 ndeo_error_threshold[0x10];
+
+ u8 mixer_offset_step_size[0x10];
+ u8 reserved_2[0x8];
+ u8 mix90_phase_for_voltage_bath[0x8];
+
+ u8 mixer_offset_start[0x10];
+ u8 mixer_offset_end[0x10];
+
+ u8 reserved_3[0x15];
+ u8 ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 sub_port[0x8];
+ u8 reserved_0[0x8];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x5];
+ u8 prio[0x3];
+ u8 reserved_2[0x6];
+ u8 mode[0x2];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x10];
+ u8 min_threshold[0x10];
+
+ u8 reserved_5[0x10];
+ u8 max_threshold[0x10];
+
+ u8 reserved_6[0x10];
+ u8 mark_probability_denominator[0x10];
+
+ u8 reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x1c];
+ u8 wrps_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 wrps_status[0x4];
+
+ u8 reserved_5[0x8];
+ u8 up_threshold[0x8];
+ u8 reserved_6[0x8];
+ u8 down_threshold[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x1c];
+ u8 srps_admin[0x4];
+
+ u8 reserved_9[0x1c];
+ u8 srps_status[0x4];
+
+ u8 reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x8];
+ u8 lb_cap[0x8];
+ u8 reserved_3[0x8];
+ u8 lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_3[0x8];
+
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
+
+ u8 reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x8];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_1[0x1c];
+ u8 prio_tc[0x3];
+
+ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+ u8 reserved_0[0x3];
+ u8 single_mac[0x1];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 max_mtu[0x10];
+ u8 reserved_2[0x10];
+
+ u8 admin_mtu[0x10];
+ u8 reserved_3[0x10];
+
+ u8 oper_mtu[0x10];
+ u8 reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x18];
+ u8 attenuation_5g[0x8];
+
+ u8 reserved_3[0x18];
+ u8 attenuation_7g[0x8];
+
+ u8 reserved_4[0x18];
+ u8 attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+ u8 module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+ u8 reserved_0[0x4];
+ u8 mlpn_status[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 e[0x1];
+ u8 reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+ u8 rxtx[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 width[0x8];
+
+ u8 lane0_module_mapping[0x20];
+
+ u8 lane1_module_mapping[0x20];
+
+ u8 lane2_module_mapping[0x20];
+
+ u8 lane3_module_mapping[0x20];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_2[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_3[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+ u8 reserved_0[0x4];
+ u8 profile_id[0xc];
+ u8 reserved_1[0x4];
+ u8 proto_mask[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x10];
+ u8 lane_speed[0x10];
+
+ u8 reserved_4[0x17];
+ u8 lpbf[0x1];
+ u8 fec_mode_policy[0x8];
+
+ u8 retransmission_capability[0x8];
+ u8 fec_mode_capability[0x18];
+
+ u8 retransmission_support_admin[0x8];
+ u8 fec_mode_support_admin[0x18];
+
+ u8 retransmission_request_admin[0x8];
+ u8 fec_mode_request_admin[0x18];
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 ib_port[0x8];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 lbf_mode[0x3];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 dic[0x1];
+ u8 reserved_2[0x19];
+ u8 ipg[0x4];
+ u8 reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xe0];
+
+ u8 port_filter[8][0x20];
+
+ u8 port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 ppan[0x4];
+ u8 reserved_2[0x4];
+ u8 prio_mask_tx[0x8];
+ u8 reserved_3[0x8];
+ u8 prio_mask_rx[0x8];
+
+ u8 pptx[0x1];
+ u8 aptx[0x1];
+ u8 reserved_4[0x6];
+ u8 pfctx[0x8];
+ u8 reserved_5[0x10];
+
+ u8 pprx[0x1];
+ u8 aprx[0x1];
+ u8 reserved_6[0x6];
+ u8 pfcrx[0x8];
+ u8 reserved_7[0x10];
+
+ u8 reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+ u8 op[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 op_admin[0x8];
+ u8 op_capability[0x8];
+ u8 op_request[0x8];
+ u8 op_active[0x8];
+
+ u8 admin[0x40];
+
+ u8 capability[0x40];
+
+ u8 request[0x40];
+
+ u8 active[0x40];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xc];
+ u8 error_count[0x4];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0xc];
+ u8 lane[0x4];
+ u8 reserved_5[0x8];
+ u8 error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_2[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 opamp_group[0x8];
+ u8 reserved_1[0xc];
+ u8 opamp_group_type[0x4];
+
+ u8 start_index[0x10];
+ u8 reserved_2[0x4];
+ u8 num_of_indices[0xc];
+
+ u8 index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+ u8 reserved_0[0x6];
+ u8 rx_lane[0x2];
+ u8 reserved_1[0x6];
+ u8 tx_lane[0x2];
+ u8 reserved_2[0x8];
+ u8 module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+ u8 reserved_0[0x6];
+ u8 lossy[0x1];
+ u8 epsb[0x1];
+ u8 reserved_1[0xc];
+ u8 size[0xc];
+
+ u8 xoff_threshold[0x10];
+ u8 xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+ u8 node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+ u8 reserved_0[0x18];
+ u8 power_settings_level[0x8];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+ u8 he[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+ u8 reserved_0[0x20];
+
+ u8 mkey[0x20];
+
+ u8 addressh_63_32[0x20];
+
+ u8 addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+ u8 dc_key[0x40];
+
+ u8 ext[0x1];
+ u8 reserved_0[0x7];
+ u8 destination_qp_dct[0x18];
+
+ u8 static_rate[0x4];
+ u8 sl_eth_prio[0x4];
+ u8 fl[0x1];
+ u8 mlid[0x7];
+ u8 rlid_udp_sport[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 rmac_47_16[0x20];
+
+ u8 rmac_15_0[0x10];
+ u8 tclass[0x8];
+ u8 hop_limit[0x8];
+
+ u8 reserved_2[0x1];
+ u8 grh[0x1];
+ u8 reserved_3[0x2];
+ u8 src_addr_index[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+ u8 reserved_0[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+
+ u8 reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+ u8 reserved_0[0x8];
+ u8 event_type[0x8];
+ u8 reserved_1[0x8];
+ u8 event_sub_type[0x8];
+
+ u8 reserved_2[0xe0];
+
+ union mlx5_ifc_event_auto_bits event_data;
+
+ u8 reserved_3[0x10];
+ u8 signature[0x8];
+ u8 reserved_4[0x7];
+ u8 owner[0x1];
+};
+
+enum {
+ MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+ u8 type[0x8];
+ u8 reserved_0[0x18];
+
+ u8 input_length[0x20];
+
+ u8 input_mailbox_pointer_63_32[0x20];
+
+ u8 input_mailbox_pointer_31_9[0x17];
+ u8 reserved_1[0x9];
+
+ u8 command_input_inline_data[16][0x8];
+
+ u8 command_output_inline_data[16][0x8];
+
+ u8 output_mailbox_pointer_63_32[0x20];
+
+ u8 output_mailbox_pointer_31_9[0x17];
+ u8 reserved_2[0x9];
+
+ u8 output_length[0x20];
+
+ u8 token[0x8];
+ u8 signature[0x8];
+ u8 reserved_3[0x8];
+ u8 status[0x7];
+ u8 ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+ u8 mailbox_data[512][0x8];
+
+ u8 reserved_0[0x180];
+
+ u8 next_pointer_63_32[0x20];
+
+ u8 next_pointer_31_10[0x16];
+ u8 reserved_1[0xa];
+
+ u8 block_number[0x20];
+
+ u8 reserved_2[0x8];
+ u8 token[0x8];
+ u8 ctrl_signature[0x8];
+ u8 signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+ u8 ptag_63_32[0x20];
+
+ u8 ptag_31_8[0x18];
+ u8 reserved_0[0x6];
+ u8 wr_en[0x1];
+ u8 rd_en[0x1];
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+ u8 fw_rev_minor[0x10];
+ u8 fw_rev_major[0x10];
+
+ u8 cmd_interface_rev[0x10];
+ u8 fw_rev_subminor[0x10];
+
+ u8 reserved_0[0x40];
+
+ u8 cmdq_phy_addr_63_32[0x20];
+
+ u8 cmdq_phy_addr_31_12[0x14];
+ u8 reserved_1[0x2];
+ u8 nic_interface[0x2];
+ u8 log_cmdq_size[0x4];
+ u8 log_cmdq_stride[0x4];
+
+ u8 command_doorbell_vector[0x20];
+
+ u8 reserved_2[0xf00];
+
+ u8 initializing[0x1];
+ u8 reserved_3[0x4];
+ u8 nic_interface_supported[0x3];
+ u8 reserved_4[0x18];
+
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+
+ u8 no_dram_nic_offset[0x20];
+
+ u8 reserved_5[0x6e40];
+
+ u8 reserved_6[0x1f];
+ u8 clear_int[0x1];
+
+ u8 health_syndrome[0x8];
+ u8 health_counter[0x18];
+
+ u8 reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+ struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+ struct mlx5_ifc_pamp_reg_bits pamp_reg;
+ struct mlx5_ifc_paos_reg_bits paos_reg;
+ struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_peir_reg_bits peir_reg;
+ struct mlx5_ifc_pelc_reg_bits pelc_reg;
+ struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_pifr_reg_bits pifr_reg;
+ struct mlx5_ifc_pipg_reg_bits pipg_reg;
+ struct mlx5_ifc_plbf_reg_bits plbf_reg;
+ struct mlx5_ifc_plib_reg_bits plib_reg;
+ struct mlx5_ifc_plpc_reg_bits plpc_reg;
+ struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+ struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+ struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+ struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+ struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+ struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+ struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+ struct mlx5_ifc_ppad_reg_bits ppad_reg;
+ struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_pplm_reg_bits pplm_reg;
+ struct mlx5_ifc_pplr_reg_bits pplr_reg;
+ struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+ struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+ struct mlx5_ifc_pspa_reg_bits pspa_reg;
+ struct mlx5_ifc_ptas_reg_bits ptas_reg;
+ struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_pude_reg_bits pude_reg;
+ struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+ struct mlx5_ifc_slrg_reg_bits slrg_reg;
+ struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ u8 reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+ u8 reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+ struct mlx5_ifc_initial_seg_bits initial_seg;
+ u8 reserved_0[0x20060];
};
#endif /* MLX5_IFC_H */
diff --git a/kernel/include/linux/mlx5/qp.h b/kernel/include/linux/mlx5/qp.h
index 310b5f7fd..f079fb1a3 100644
--- a/kernel/include/linux/mlx5/qp.h
+++ b/kernel/include/linux/mlx5/qp.h
@@ -134,13 +134,21 @@ enum {
enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
};
enum {
+ MLX5_SEND_WQE_DS = 16,
MLX5_SEND_WQE_BB = 64,
};
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+ MLX5_SEND_WQE_MAX_WQEBBS = 16,
+};
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+enum {
+ MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+ MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
+ MLX5_ETH_WQE_L3_CSUM = 1 << 6,
+ MLX5_ETH_WQE_L4_CSUM = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+ u8 rsvd0[4];
+ u8 cs_flags;
+ u8 rsvd1;
+ __be16 mss;
+ __be32 rsvd2;
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+};
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
diff --git a/kernel/include/linux/mlx5/vport.h b/kernel/include/linux/mlx5/vport.h
new file mode 100644
index 000000000..967e0fd06
--- /dev/null
+++ b/kernel/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/kernel/include/linux/mm-arch-hooks.h b/kernel/include/linux/mm-arch-hooks.h
new file mode 100644
index 000000000..4efc3f56e
--- /dev/null
+++ b/kernel/include/linux/mm-arch-hooks.h
@@ -0,0 +1,25 @@
+/*
+ * Generic mm no-op hooks.
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_MM_ARCH_HOOKS_H
+#define _LINUX_MM_ARCH_HOOKS_H
+
+#include <asm/mm-arch-hooks.h>
+
+#ifndef arch_remap
+static inline void arch_remap(struct mm_struct *mm,
+ unsigned long old_start, unsigned long old_end,
+ unsigned long new_start, unsigned long new_end)
+{
+}
+#define arch_remap arch_remap
+#endif
+
+#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/kernel/include/linux/mm.h b/kernel/include/linux/mm.h
index b2085582d..00bad7793 100644
--- a/kernel/include/linux/mm.h
+++ b/kernel/include/linux/mm.h
@@ -20,6 +20,7 @@
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
+#include <linux/err.h>
struct mempolicy;
struct anon_vma;
@@ -27,6 +28,7 @@ struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;
+struct bdi_writeback;
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -123,8 +125,10 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
@@ -135,6 +139,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
@@ -198,6 +203,9 @@ extern unsigned int kobjsize(const void *objp);
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
+/* This mask is used to clear all the VMA flags used by mlock */
+#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
@@ -244,7 +252,10 @@ struct vm_fault {
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
+ int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
+ pmd_t *, unsigned int flags);
void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* notification that a previously read-only page is about to become
@@ -303,18 +314,6 @@ struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
-/* It's valid only if the page is free path or free_list */
-static inline void set_freepage_migratetype(struct page *page, int migratetype)
-{
- page->index = migratetype;
-}
-
-/* It's valid only if the page is free path or free_list */
-static inline int get_freepage_migratetype(struct page *page)
-{
- return page->index;
-}
-
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
@@ -355,20 +354,15 @@ static inline int get_page_unless_zero(struct page *page)
return atomic_inc_not_zero(&page->_count);
}
-/*
- * Try to drop a ref unless the page has a refcount of one, return false if
- * that is the case.
- * This is to make sure that the refcount won't become zero after this drop.
- * This can be called when MMU is off so it must not access
- * any of the virtual mappings.
- */
-static inline int put_page_unless_one(struct page *page)
-{
- return atomic_add_unless(&page->_count, -1, 1);
-}
-
extern int page_is_ram(unsigned long pfn);
-extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
+
+enum {
+ REGION_INTERSECTS,
+ REGION_DISJOINT,
+ REGION_MIXED,
+};
+
+int region_intersects(resource_size_t offset, size_t size, const char *type);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -436,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page,
#endif
}
-static inline struct page *compound_head_by_tail(struct page *tail)
-{
- struct page *head = tail->first_page;
-
- /*
- * page->first_page may be a dangling pointer to an old
- * compound page, so recheck that it is still a tail
- * page before returning.
- */
- smp_rmb();
- if (likely(PageTail(tail)))
- return head;
- return tail;
-}
-
-/*
- * Since either compound page could be dismantled asynchronously in THP
- * or we access asynchronously arbitrary positioned struct page, there
- * would be tail flag race. To handle this race, we should call
- * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
- */
-static inline struct page *compound_head(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return compound_head_by_tail(page);
- return page;
-}
-
-/*
- * If we access compound page synchronously such as access to
- * allocated page, there is no need to handle tail flag race, so we can
- * check tail flag directly without any synchronization primitive.
- */
-static inline struct page *compound_head_fast(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return page->first_page;
- return page;
-}
-
/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
@@ -499,7 +453,7 @@ static inline int page_count(struct page *page)
static inline bool __compound_tail_refcounted(struct page *page)
{
- return !PageSlab(page) && !PageHeadHuge(page);
+ return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page);
}
/*
@@ -524,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page)
VM_BUG_ON_PAGE(!PageTail(page), page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
- if (compound_tail_refcounted(page->first_page))
+ if (compound_tail_refcounted(compound_head(page)))
atomic_inc(&page->_mapcount);
}
@@ -547,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
- /*
- * We don't need to worry about synchronization of tail flag
- * when we call virt_to_head_page() since it is only called for
- * already allocated page and this page won't be freed until
- * this virt_to_head_page() is finished. So use _fast variant.
- */
- return compound_head_fast(page);
+ return compound_head(page);
}
/*
@@ -574,28 +522,42 @@ int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
- * These are _only_ valid on the head of a PG_compound page.
+ * These are _only_ valid on the head of a compound page.
*/
+typedef void compound_page_dtor(struct page *);
+
+/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
+enum compound_dtor_id {
+ NULL_COMPOUND_DTOR,
+ COMPOUND_PAGE_DTOR,
+#ifdef CONFIG_HUGETLB_PAGE
+ HUGETLB_PAGE_DTOR,
+#endif
+ NR_COMPOUND_DTORS,
+};
+extern compound_page_dtor * const compound_page_dtors[];
static inline void set_compound_page_dtor(struct page *page,
- compound_page_dtor *dtor)
+ enum compound_dtor_id compound_dtor)
{
- page[1].compound_dtor = dtor;
+ VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
+ page[1].compound_dtor = compound_dtor;
}
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
- return page[1].compound_dtor;
+ VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
+ return compound_page_dtors[page[1].compound_dtor];
}
-static inline int compound_order(struct page *page)
+static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
-static inline void set_compound_order(struct page *page, unsigned long order)
+static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
}
@@ -915,6 +877,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+ page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
@@ -1225,6 +1208,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+
+/* Container for pinned pfns / pages */
+struct frame_vector {
+ unsigned int nr_allocated; /* Number of frames we have space for */
+ unsigned int nr_frames; /* Number of frames stored in ptrs array */
+ bool got_ref; /* Did we pin pages by getting page ref? */
+ bool is_pfns; /* Does array contain pages or pfns? */
+ void *ptrs[0]; /* Array of pinned pfns / pages. Use
+ * pfns_vector_pages() or pfns_vector_pfns()
+ * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+ bool write, bool force, struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+ return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+ if (vec->is_pfns) {
+ int err = frame_vector_to_pages(vec);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+ return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+ if (!vec->is_pfns)
+ frame_vector_to_pfns(vec);
+ return (unsigned long *)(vec->ptrs);
+}
+
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
@@ -1239,10 +1265,13 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_cleaned(struct page *page, struct address_space *mapping);
+void account_page_dirtied(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg);
+void account_page_cleaned(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg, struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
+void cancel_dirty_page(struct page *page);
int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -1253,6 +1282,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
static inline int stack_guard_page_start(struct vm_area_struct *vma,
unsigned long addr)
{
@@ -1506,8 +1540,7 @@ static inline bool ptlock_init(struct page *page)
* with 0. Make sure nobody took it in use in between.
*
* It can happen if arch try to use slab for page table allocation:
- * slab code uses page->slab_cache and page->first_page (for tail
- * pages), which share storage with page->ptl.
+ * slab code uses page->slab_cache, which share storage with page->ptl.
*/
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page))
@@ -1544,8 +1577,10 @@ static inline void pgtable_init(void)
static inline bool pgtable_page_ctor(struct page *page)
{
+ if (!ptlock_init(page))
+ return false;
inc_zone_page_state(page, NR_PAGETABLE);
- return ptlock_init(page);
+ return true;
}
static inline void pgtable_page_dtor(struct page *page)
@@ -1659,6 +1694,8 @@ extern void free_highmem_page(struct page *page);
extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);
+extern void reserve_bootmem_region(unsigned long start, unsigned long end);
+
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
{
@@ -1748,7 +1785,8 @@ extern void sparse_memory_present_with_active_regions(int nid);
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn)
+static inline int __early_pfn_to_nid(unsigned long pfn,
+ struct mminit_pfnnid_cache *state)
{
return 0;
}
@@ -1756,7 +1794,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn);
+extern int __meminit __early_pfn_to_nid(unsigned long pfn,
+ struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -1771,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
+ const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
@@ -1825,7 +1865,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *);
+ struct mempolicy *, struct vm_userfaultfd_ctx);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1872,11 +1912,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+static inline unsigned long
+do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot, unsigned long flags,
+ unsigned long pgoff, unsigned long *populate)
+{
+ return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
+}
+
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
@@ -1962,8 +2010,6 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset,
unsigned long size);
-unsigned long max_sane_readahead(unsigned long nr);
-
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2063,6 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
+#define FOLL_MLOCK 0x1000 /* lock present pages */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
@@ -2174,12 +2221,48 @@ enum mf_flags {
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
+extern int get_hwpoison_page(struct page *page);
+extern void put_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);
+
+/*
+ * Error handlers for various types of pages.
+ */
+enum mf_result {
+ MF_IGNORED, /* Error: cannot be handled */
+ MF_FAILED, /* Error: handling failed */
+ MF_DELAYED, /* Will be handled later */
+ MF_RECOVERED, /* Successfully recovered */
+};
+
+enum mf_action_page_type {
+ MF_MSG_KERNEL,
+ MF_MSG_KERNEL_HIGH_ORDER,
+ MF_MSG_SLAB,
+ MF_MSG_DIFFERENT_COMPOUND,
+ MF_MSG_POISONED_HUGE,
+ MF_MSG_HUGE,
+ MF_MSG_FREE_HUGE,
+ MF_MSG_UNMAP_FAILED,
+ MF_MSG_DIRTY_SWAPCACHE,
+ MF_MSG_CLEAN_SWAPCACHE,
+ MF_MSG_DIRTY_MLOCKED_LRU,
+ MF_MSG_CLEAN_MLOCKED_LRU,
+ MF_MSG_DIRTY_UNEVICTABLE_LRU,
+ MF_MSG_CLEAN_UNEVICTABLE_LRU,
+ MF_MSG_DIRTY_LRU,
+ MF_MSG_CLEAN_LRU,
+ MF_MSG_TRUNCATED_LRU,
+ MF_MSG_BUDDY,
+ MF_MSG_BUDDY_2ND,
+ MF_MSG_UNKNOWN,
+};
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr,
diff --git a/kernel/include/linux/mm_types.h b/kernel/include/linux/mm_types.h
index 89c047144..b238ebfbb 100644
--- a/kernel/include/linux/mm_types.h
+++ b/kernel/include/linux/mm_types.h
@@ -29,8 +29,6 @@ struct mem_cgroup;
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
-typedef void compound_page_dtor(struct page *);
-
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -114,7 +112,13 @@ struct page {
};
};
- /* Third double word block */
+ /*
+ * Third double word block
+ *
+ * WARNING: bit 0 of the first word encode PageTail(). That means
+ * the rest users of the storage space MUST NOT use the bit to
+ * avoid collision and false-positive PageTail().
+ */
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
@@ -132,18 +136,37 @@ struct page {
#endif
};
- struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
- /* First tail page of compound page */
+ /* Tail pages of compound page */
struct {
- compound_page_dtor *compound_dtor;
- unsigned long compound_order;
+ unsigned long compound_head; /* If bit zero is set */
+
+ /* First tail page only */
+#ifdef CONFIG_64BIT
+ /*
+ * On 64 bit system we have enough space in struct page
+ * to encode compound_dtor and compound_order with
+ * unsigned int. It can help compiler generate better or
+ * smaller code on some archtectures.
+ */
+ unsigned int compound_dtor;
+ unsigned int compound_order;
+#else
+ unsigned short int compound_dtor;
+ unsigned short int compound_order;
+#endif
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct {
+ unsigned long __pad; /* do not overlay pmd_huge_pte
+ * with compound_head to avoid
+ * possible bit 0 collision.
+ */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ };
#endif
};
@@ -164,7 +187,6 @@ struct page {
#endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
- struct page *first_page; /* Compound tail pages */
};
#ifdef CONFIG_MEMCG
@@ -218,7 +240,25 @@ struct page_frag {
#endif
};
-typedef unsigned long __nocast vm_flags_t;
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+ void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ __u16 offset;
+ __u16 size;
+#else
+ __u32 offset;
+#endif
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_count every time we allocate a fragment.
+ */
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
+typedef unsigned long vm_flags_t;
/*
* A region containing a mapping of a non-memory backed file under NOMMU
@@ -239,6 +279,16 @@ struct vm_region {
* this region */
};
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+ struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
/*
* This struct defines a memory VMM memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -305,6 +355,7 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
struct core_thread {
@@ -461,6 +512,9 @@ struct mm_struct {
/* address of the bounds directory */
void __user *bd_addr;
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -529,6 +583,7 @@ enum tlb_flush_reason {
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
+ TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};
diff --git a/kernel/include/linux/mmc/card.h b/kernel/include/linux/mmc/card.h
index 19f0175c0..eb0151bac 100644
--- a/kernel/include/linux/mmc/card.h
+++ b/kernel/include/linux/mmc/card.h
@@ -97,6 +97,7 @@ struct mmc_ext_csd {
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
+ u8 raw_driver_strength; /* 197 */
u8 out_of_int_time; /* 198 */
u8 raw_pwr_cl_52_195; /* 200 */
u8 raw_pwr_cl_26_195; /* 201 */
@@ -268,7 +269,6 @@ struct mmc_card {
/* for byte mode */
#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
/* (missing CIA registers) */
-#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
@@ -278,10 +278,13 @@ struct mmc_card {
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
+#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
+ unsigned int eg_boundary; /* don't cross erase-group boundaries */
u8 erased_byte; /* value of erased bytes */
u32 raw_cid[4]; /* raw card CID */
@@ -305,6 +308,7 @@ struct mmc_card {
unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
unsigned int mmc_avail_type; /* supported device type by both host and card */
+ unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
diff --git a/kernel/include/linux/mmc/core.h b/kernel/include/linux/mmc/core.h
index de722d4e9..37967b6da 100644
--- a/kernel/include/linux/mmc/core.h
+++ b/kernel/include/linux/mmc/core.h
@@ -121,6 +121,7 @@ struct mmc_data {
struct mmc_request *mrq; /* associated request */
unsigned int sg_len; /* size of scatter list */
+ int sg_count; /* mapped sg entries */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
};
@@ -151,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
- bool, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
-extern int mmc_send_tuning(struct mmc_host *host);
+extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#define MMC_ERASE_ARG 0x00000000
diff --git a/kernel/include/linux/mmc/dw_mmc.h b/kernel/include/linux/mmc/dw_mmc.h
index 12111993a..f67b2ec18 100644
--- a/kernel/include/linux/mmc/dw_mmc.h
+++ b/kernel/include/linux/mmc/dw_mmc.h
@@ -16,6 +16,7 @@
#include <linux/scatterlist.h>
#include <linux/mmc/core.h>
+#include <linux/dmaengine.h>
#define MAX_MCI_SLOTS 2
@@ -40,6 +41,17 @@ enum {
struct mmc_data;
+enum {
+ TRANS_MODE_PIO = 0,
+ TRANS_MODE_IDMAC,
+ TRANS_MODE_EDMAC
+};
+
+struct dw_mci_dma_slave {
+ struct dma_chan *ch;
+ enum dma_transfer_direction direction;
+};
+
/**
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
@@ -98,6 +110,7 @@ struct mmc_data;
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
* @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @dto_timer: Timer for broken data transfer over scheme.
*
* Locking
* =======
@@ -153,11 +166,14 @@ struct dw_mci {
dma_addr_t sg_dma;
void *sg_cpu;
const struct dw_mci_dma_ops *dma_ops;
-#ifdef CONFIG_MMC_DW_IDMAC
+ /* For idmac */
unsigned int ring_size;
-#else
- struct dw_mci_dma_data *dma_data;
-#endif
+
+ /* For edmac */
+ struct dw_mci_dma_slave *dms;
+ /* Registers's physical base address */
+ void *phy_regs;
+
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
@@ -204,14 +220,15 @@ struct dw_mci {
int sdio_id0;
struct timer_list cmd11_timer;
+ struct timer_list dto_timer;
};
/* DMA ops for Internal/External DMAC interface */
struct dw_mci_dma_ops {
/* DMA Ops */
int (*init)(struct dw_mci *host);
- void (*start)(struct dw_mci *host, unsigned int sg_len);
- void (*complete)(struct dw_mci *host);
+ int (*start)(struct dw_mci *host, unsigned int sg_len);
+ void (*complete)(void *host);
void (*stop)(struct dw_mci *host);
void (*cleanup)(struct dw_mci *host);
void (*exit)(struct dw_mci *host);
@@ -226,12 +243,8 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
-/* No write protect */
-#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
-
-/* Slot level quirks */
-/* This slot has no write protect */
-#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
+/* Timer for broken data transfer over scheme */
+#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
struct dma_pdata;
@@ -265,7 +278,6 @@ struct dw_mci_board {
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
- struct block_settings *blk_settings;
};
#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/kernel/include/linux/mmc/host.h b/kernel/include/linux/mmc/host.h
index b5bedaec6..8673ffe3d 100644
--- a/kernel/include/linux/mmc/host.h
+++ b/kernel/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/mutex.h>
+#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
@@ -131,7 +132,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
- int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
+ int (*select_drive_strength)(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
@@ -285,21 +288,10 @@ struct mmc_host {
MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
+#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
mmc_pm_flag_t pm_caps; /* supported pm features */
-#ifdef CONFIG_MMC_CLKGATE
- int clk_requests; /* internal reference counter */
- unsigned int clk_delay; /* number of MCI clk hold cycles */
- bool clk_gated; /* clock gated */
- struct delayed_work clk_gate_work; /* delayed clock gate */
- unsigned int clk_old; /* old clock value cache */
- spinlock_t clk_lock; /* lock for clk fields */
- struct mutex clk_gate_mutex; /* mutex for clock gating */
- struct device_attribute clkgate_delay_attr;
- unsigned long clkgate_delay;
-#endif
-
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_segs; /* see blk_queue_max_segments */
@@ -321,10 +313,18 @@ struct mmc_host {
#ifdef CONFIG_MMC_DEBUG
unsigned int removed:1; /* host is being removed */
#endif
+ unsigned int can_retune:1; /* re-tuning can be used */
+ unsigned int doing_retune:1; /* re-tuning in progress */
+ unsigned int retune_now:1; /* do re-tuning at next req */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
+ int need_retune; /* re-tuning is needed */
+ int hold_retune; /* hold off re-tuning */
+ unsigned int retune_period; /* re-tuning period in secs */
+ struct timer_list retune_timer; /* for periodic re-tuning */
+
bool trigger_card_event; /* card_event necessary */
struct mmc_card *card; /* device attached to this host */
@@ -400,7 +400,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
host->sdio_irq_pending = true;
- wake_up_process(host->sdio_irq_thread);
+ if (host->sdio_irq_thread)
+ wake_up_process(host->sdio_irq_thread);
}
void sdio_run_irqs(struct mmc_host *host);
@@ -410,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply);
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
#else
static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
{
@@ -422,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
{
return 0;
}
+
+static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ return -EINVAL;
+}
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
@@ -466,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host)
return host->caps2 & MMC_CAP2_PACKED_WR;
}
-#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_hold(struct mmc_host *host);
-void mmc_host_clk_release(struct mmc_host *host);
-unsigned int mmc_host_clk_rate(struct mmc_host *host);
-
-#else
-static inline void mmc_host_clk_hold(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_release(struct mmc_host *host)
-{
-}
-
-static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
- return host->ios.clock;
-}
-#endif
-
static inline int mmc_card_hs(struct mmc_card *card)
{
return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -513,4 +501,18 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+void mmc_retune_timer_stop(struct mmc_host *host);
+
+static inline void mmc_retune_needed(struct mmc_host *host)
+{
+ if (host->can_retune)
+ host->need_retune = 1;
+}
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/kernel/include/linux/mmc/mmc.h b/kernel/include/linux/mmc/mmc.h
index 124f56211..15f2c4a0a 100644
--- a/kernel/include/linux/mmc/mmc.h
+++ b/kernel/include/linux/mmc/mmc.h
@@ -302,6 +302,7 @@ struct _mmc_csd {
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
@@ -390,6 +391,7 @@ struct _mmc_csd {
#define EXT_CSD_TIMING_HS 1 /* High speed */
#define EXT_CSD_TIMING_HS200 2 /* HS200 */
#define EXT_CSD_TIMING_HS400 3 /* HS400 */
+#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
@@ -441,4 +443,6 @@ struct _mmc_csd {
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+#define mmc_driver_type_mask(n) (1 << (n))
+
#endif /* LINUX_MMC_MMC_H */
diff --git a/kernel/include/linux/mmc/sdhci-pci-data.h b/kernel/include/linux/mmc/sdhci-pci-data.h
index 8959604a1..fda15b6d4 100644
--- a/kernel/include/linux/mmc/sdhci-pci-data.h
+++ b/kernel/include/linux/mmc/sdhci-pci-data.h
@@ -15,4 +15,6 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
+extern int sdhci_pci_spt_drive_strength;
+
#endif
diff --git a/kernel/include/linux/mmdebug.h b/kernel/include/linux/mmdebug.h
index 877ef226f..772362adf 100644
--- a/kernel/include/linux/mmdebug.h
+++ b/kernel/include/linux/mmdebug.h
@@ -1,6 +1,7 @@
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
+#include <linux/bug.h>
#include <linux/stringify.h>
struct page;
diff --git a/kernel/include/linux/mmiotrace.h b/kernel/include/linux/mmiotrace.h
index c5d52780d..3ba327af0 100644
--- a/kernel/include/linux/mmiotrace.h
+++ b/kernel/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
extern void disable_mmiotrace(void);
extern void mmio_trace_rw(struct mmiotrace_rw *rw);
extern void mmio_trace_mapping(struct mmiotrace_map *map);
-extern int mmio_trace_printk(const char *fmt, va_list args);
+extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
#endif /* _LINUX_MMIOTRACE_H */
diff --git a/kernel/include/linux/mmu_notifier.h b/kernel/include/linux/mmu_notifier.h
index 95243d28a..a1a210d59 100644
--- a/kernel/include/linux/mmu_notifier.h
+++ b/kernel/include/linux/mmu_notifier.h
@@ -66,6 +66,16 @@ struct mmu_notifier_ops {
unsigned long end);
/*
+ * clear_young is a lightweight version of clear_flush_young. Like the
+ * latter, it is supposed to test-and-clear the young/accessed bitflag
+ * in the secondary pte, but it may omit flushing the secondary tlb.
+ */
+ int (*clear_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
* frequently used without actually clearing the flag or tearing
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
+extern int __mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline int mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_young(mm, start, end);
+ return 0;
+}
+
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define ptep_clear_young_notify(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PAGE_SIZE); \
+ __young; \
+})
+
+#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PMD_SIZE); \
+ __young; \
+})
+
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
unsigned long ___addr = __address & PAGE_MASK; \
@@ -324,25 +368,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pte; \
})
-#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
mmu_notifier_invalidate_range(___mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
___pmd; \
})
-#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
mmu_notifier_invalidate_range(__mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
@@ -427,9 +471,11 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_young_notify ptep_test_and_clear_young
+#define pmdp_clear_young_notify pmdp_test_and_clear_young
#define ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_clear_flush_notify pmdp_clear_flush
-#define pmdp_get_and_clear_notify pmdp_get_and_clear
+#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/kernel/include/linux/mmzone.h b/kernel/include/linux/mmzone.h
index 54d74f6eb..e23a9e704 100644
--- a/kernel/include/linux/mmzone.h
+++ b/kernel/include/linux/mmzone.h
@@ -37,10 +37,10 @@
enum {
MIGRATE_UNMOVABLE,
- MIGRATE_RECLAIMABLE,
MIGRATE_MOVABLE,
+ MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -319,7 +319,11 @@ enum zone_type {
ZONE_HIGHMEM,
#endif
ZONE_MOVABLE,
+#ifdef CONFIG_ZONE_DEVICE
+ ZONE_DEVICE,
+#endif
__MAX_NR_ZONES
+
};
#ifndef __GENERATING_BOUNDS_H
@@ -330,13 +334,16 @@ struct zone {
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long watermark[NR_WMARK];
+ unsigned long nr_reserved_highatomic;
+
/*
- * We don't know if the memory that we're going to allocate will be freeable
- * or/and it will be released eventually, so to avoid totally wasting several
- * GB of ram we must reserve some of the lower zone memory (otherwise we risk
- * to run OOM on the lower zones despite there's tons of freeable ram
- * on the higher zones). This array is recalculated at runtime if the
- * sysctl_lowmem_reserve_ratio sysctl changes.
+ * We don't know if the memory that we're going to allocate will be
+ * freeable or/and it will be released eventually, so to avoid totally
+ * wasting several GB of ram we must reserve some of the lower zone
+ * memory (otherwise we risk to run OOM on the lower zones despite
+ * there being tons of freeable ram on the higher zones). This array is
+ * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
+ * changes.
*/
long lowmem_reserve[MAX_NR_ZONES];
@@ -425,12 +432,6 @@ struct zone {
const char *name;
- /*
- * Number of MIGRATE_RESERVE page block. To maintain for just
- * optimization. Protected by zone->lock.
- */
- int nr_migrate_reserve_block;
-
#ifdef CONFIG_MEMORY_ISOLATION
/*
* Number of isolated pageblock. It is used to solve incorrect
@@ -585,75 +586,8 @@ static inline bool zone_is_empty(struct zone *zone)
* [1] : No fallback (__GFP_THISNODE)
*/
#define MAX_ZONELISTS 2
-
-
-/*
- * We cache key information from each zonelist for smaller cache
- * footprint when scanning for free pages in get_page_from_freelist().
- *
- * 1) The BITMAP fullzones tracks which zones in a zonelist have come
- * up short of free memory since the last time (last_fullzone_zap)
- * we zero'd fullzones.
- * 2) The array z_to_n[] maps each zone in the zonelist to its node
- * id, so that we can efficiently evaluate whether that node is
- * set in the current tasks mems_allowed.
- *
- * Both fullzones and z_to_n[] are one-to-one with the zonelist,
- * indexed by a zones offset in the zonelist zones[] array.
- *
- * The get_page_from_freelist() routine does two scans. During the
- * first scan, we skip zones whose corresponding bit in 'fullzones'
- * is set or whose corresponding node in current->mems_allowed (which
- * comes from cpusets) is not set. During the second scan, we bypass
- * this zonelist_cache, to ensure we look methodically at each zone.
- *
- * Once per second, we zero out (zap) fullzones, forcing us to
- * reconsider nodes that might have regained more free memory.
- * The field last_full_zap is the time we last zapped fullzones.
- *
- * This mechanism reduces the amount of time we waste repeatedly
- * reexaming zones for free memory when they just came up low on
- * memory momentarilly ago.
- *
- * The zonelist_cache struct members logically belong in struct
- * zonelist. However, the mempolicy zonelists constructed for
- * MPOL_BIND are intentionally variable length (and usually much
- * shorter). A general purpose mechanism for handling structs with
- * multiple variable length members is more mechanism than we want
- * here. We resort to some special case hackery instead.
- *
- * The MPOL_BIND zonelists don't need this zonelist_cache (in good
- * part because they are shorter), so we put the fixed length stuff
- * at the front of the zonelist struct, ending in a variable length
- * zones[], as is needed by MPOL_BIND.
- *
- * Then we put the optional zonelist cache on the end of the zonelist
- * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
- * the fixed length portion at the front of the struct. This pointer
- * both enables us to find the zonelist cache, and in the case of
- * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
- * to know that the zonelist cache is not there.
- *
- * The end result is that struct zonelists come in two flavors:
- * 1) The full, fixed length version, shown below, and
- * 2) The custom zonelists for MPOL_BIND.
- * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
- *
- * Even though there may be multiple CPU cores on a node modifying
- * fullzones or last_full_zap in the same zonelist_cache at the same
- * time, we don't lock it. This is just hint data - if it is wrong now
- * and then, the allocator will still function, perhaps a bit slower.
- */
-
-
-struct zonelist_cache {
- unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
- DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
- unsigned long last_full_zap; /* when last zap'd (jiffies) */
-};
#else
#define MAX_ZONELISTS 1
-struct zonelist_cache;
#endif
/*
@@ -671,9 +605,6 @@ struct zoneref {
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
- * If zlcache_ptr is not NULL, then it is just the address of zlcache,
- * as explained above. If zlcache_ptr is NULL, there is no zlcache.
- * *
* To speed the reading of the zonelist, the zonerefs contain the zone index
* of the entry being read. Helper functions to access information given
* a struct zoneref are
@@ -683,21 +614,9 @@ struct zoneref {
* zonelist_node_idx() - Return the index of the node for an entry
*/
struct zonelist {
- struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
-#ifdef CONFIG_NUMA
- struct zonelist_cache zlcache; // optional ...
-#endif
};
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-struct node_active_region {
- unsigned long start_pfn;
- unsigned long end_pfn;
- int nid;
-};
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
@@ -762,6 +681,14 @@ typedef struct pglist_data {
/* Number of pages migrated during the rate limiting time interval */
unsigned long numabalancing_migrate_nr_pages;
#endif
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ /*
+ * If memory initialisation on large machines is deferred then this
+ * is the first PFN that needs to be initialised.
+ */
+ unsigned long first_deferred_pfn;
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -786,6 +713,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
+static inline int zone_id(const struct zone *zone)
+{
+ struct pglist_data *pgdat = zone->zone_pgdat;
+
+ return zone - pgdat->node_zones;
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return zone_id(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return false;
+}
+#endif
+
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
@@ -794,14 +740,13 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx, int alloc_flags);
+ unsigned long mark, int classzone_idx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
};
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long size,
- enum memmap_context context);
+ unsigned long size);
extern void lruvec_init(struct lruvec *lruvec);
@@ -1216,11 +1161,16 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-bool early_pfn_in_nid(unsigned long pfn, int nid);
-#else
-#define early_pfn_in_nid(pfn, nid) (1)
-#endif
+/*
+ * During memory init memblocks map pfns to nids. The search is expensive and
+ * this caches recent lookups. The implementation of __early_pfn_to_nid
+ * may treat start/end as pfns or sections.
+ */
+struct mminit_pfnnid_cache {
+ unsigned long last_start;
+ unsigned long last_end;
+ int last_nid;
+};
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
diff --git a/kernel/include/linux/mod_devicetable.h b/kernel/include/linux/mod_devicetable.h
index 3bfd56778..64f36e09a 100644
--- a/kernel/include/linux/mod_devicetable.h
+++ b/kernel/include/linux/mod_devicetable.h
@@ -189,6 +189,8 @@ struct css_device_id {
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
+ __u32 cls;
+ __u32 cls_msk;
};
#define PNP_ID_LEN 8
@@ -217,6 +219,14 @@ struct serio_device_id {
__u8 proto;
};
+struct hda_device_id {
+ __u32 vendor_id;
+ __u32 rev_id;
+ __u8 api_version;
+ const char *name;
+ unsigned long driver_data;
+};
+
/*
* Struct used for matching a device
*/
@@ -251,7 +261,7 @@ struct pcmcia_device_id {
__u32 prod_id_hash[4];
- /* not matched against in kernelspace*/
+ /* not matched against in kernelspace */
const char * prod_id[4];
/* not matched against */
@@ -599,9 +609,21 @@ struct ipack_device_id {
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
+#define MEI_CL_VERSION_ANY 0xff
+/**
+ * struct mei_cl_device_id - MEI client device identifier
+ * @name: helper name
+ * @uuid: client uuid
+ * @version: client protocol version
+ * @driver_info: information used by the driver.
+ *
+ * identifies mei client device by uuid and name
+ */
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
+ uuid_le uuid;
+ __u8 version;
kernel_ulong_t driver_info;
};
@@ -629,4 +651,10 @@ struct mcb_device_id {
kernel_ulong_t driver_data;
};
+struct ulpi_device_id {
+ __u16 vendor;
+ __u16 product;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/kernel/include/linux/module.h b/kernel/include/linux/module.h
index c883b86ea..b229a9961 100644
--- a/kernel/include/linux/module.h
+++ b/kernel/include/linux/module.h
@@ -11,12 +11,14 @@
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
+#include <linux/init.h>
#include <linux/elf.h>
#include <linux/stringify.h>
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/jump_label.h>
#include <linux/export.h>
+#include <linux/rbtree_latch.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -70,6 +72,89 @@ extern struct module_attribute module_uevent;
extern int init_module(void);
extern void cleanup_module(void);
+#ifndef MODULE
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module). There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module. If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn) module_init(fn)
+#define core_initcall(fn) module_init(fn)
+#define core_initcall_sync(fn) module_init(fn)
+#define postcore_initcall(fn) module_init(fn)
+#define postcore_initcall_sync(fn) module_init(fn)
+#define arch_initcall(fn) module_init(fn)
+#define subsys_initcall(fn) module_init(fn)
+#define subsys_initcall_sync(fn) module_init(fn)
+#define fs_initcall(fn) module_init(fn)
+#define fs_initcall_sync(fn) module_init(fn)
+#define rootfs_initcall(fn) module_init(fn)
+#define device_initcall(fn) module_init(fn)
+#define device_initcall_sync(fn) module_init(fn)
+#define late_initcall(fn) module_init(fn)
+#define late_initcall_sync(fn) module_init(fn)
+
+#define console_initcall(fn) module_init(fn)
+#define security_initcall(fn) module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn) \
+ static inline exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#endif
+
+/* This means "can be init if no module support, otherwise module load
+ may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
/* Archs provide a method of finding the correct exception table. */
struct exception_table_entry;
@@ -210,6 +295,19 @@ enum module_state {
MODULE_STATE_UNFORMED, /* Still setting it up. */
};
+struct module;
+
+struct mod_tree_node {
+ struct module *mod;
+ struct latch_tree_node node;
+};
+
+struct mod_kallsyms {
+ Elf_Sym *symtab;
+ unsigned int num_symtab;
+ char *strtab;
+};
+
struct module {
enum module_state state;
@@ -232,6 +330,9 @@ struct module {
unsigned int num_syms;
/* Kernel parameters. */
+#ifdef CONFIG_SYSFS
+ struct mutex param_lock;
+#endif
struct kernel_param *kp;
unsigned int num_kp;
@@ -257,6 +358,8 @@ struct module {
bool sig_ok;
#endif
+ bool async_probe_requested;
+
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
const unsigned long *gpl_future_crcs;
@@ -269,8 +372,15 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* If this is non-NULL, vfree after init() returns */
- void *module_init;
+ /*
+ * If this is non-NULL, vfree() after init() returns.
+ *
+ * Cacheline align here, such that:
+ * module_init, module_core, init_size, core_size,
+ * init_text_size, core_text_size and mtn_core::{mod,node[0]}
+ * are on the same cacheline.
+ */
+ void *module_init ____cacheline_aligned;
/* Here is the actual code + data, vfree'd on unload. */
void *module_core;
@@ -281,6 +391,16 @@ struct module {
/* The size of the executable code in each section. */
unsigned int init_text_size, core_text_size;
+#ifdef CONFIG_MODULES_TREE_LOOKUP
+ /*
+ * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
+ * above entries such that a regular lookup will only touch one
+ * cacheline.
+ */
+ struct mod_tree_node mtn_core;
+ struct mod_tree_node mtn_init;
+#endif
+
/* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size;
@@ -297,14 +417,9 @@ struct module {
#endif
#ifdef CONFIG_KALLSYMS
- /*
- * We keep the symbol and string tables for kallsyms.
- * The core_* fields below are temporary, loader-only (they
- * could really be discarded after module init).
- */
- Elf_Sym *symtab, *core_symtab;
- unsigned int num_symtab, core_num_syms;
- char *strtab, *core_strtab;
+ /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+ struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms core_kallsyms;
/* Section attributes */
struct module_sect_attrs *sect_attrs;
@@ -336,7 +451,7 @@ struct module {
const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call **trace_events;
+ struct trace_event_call **trace_events;
unsigned int num_trace_events;
struct trace_enum_map **trace_enums;
unsigned int num_trace_enums;
@@ -367,7 +482,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-};
+} ____cacheline_aligned;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
@@ -421,14 +536,22 @@ struct symsearch {
bool unused;
};
-/* Search for an exported symbol by name. */
+/*
+ * Search for an exported symbol by name.
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
bool gplok,
bool warn);
-/* Walk the exported symbol table */
+/*
+ * Walk the exported symbol table
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
struct module *owner,
void *data), void *data);
@@ -508,6 +631,11 @@ int unregister_module_notifier(struct notifier_block *nb);
extern void print_modules(void);
+static inline bool module_requested_async_probing(struct module *module)
+{
+ return module && module->async_probe_requested;
+}
+
#else /* !CONFIG_MODULES... */
/* Given an address, look for it in the exception tables. */
@@ -618,6 +746,12 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
static inline void print_modules(void)
{
}
+
+static inline bool module_requested_async_probing(struct module *module)
+{
+ return false;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
@@ -655,4 +789,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_MODULE_SIG
+static inline bool module_sig_ok(struct module *module)
+{
+ return module->sig_ok;
+}
+#else /* !CONFIG_MODULE_SIG */
+static inline bool module_sig_ok(struct module *module)
+{
+ return true;
+}
+#endif /* CONFIG_MODULE_SIG */
+
#endif /* _LINUX_MODULE_H */
diff --git a/kernel/include/linux/moduleparam.h b/kernel/include/linux/moduleparam.h
index 1c9effa25..52666d90c 100644
--- a/kernel/include/linux/moduleparam.h
+++ b/kernel/include/linux/moduleparam.h
@@ -67,8 +67,9 @@ enum {
struct kernel_param {
const char *name;
+ struct module *mod;
const struct kernel_param_ops *ops;
- u16 perm;
+ const u16 perm;
s8 level;
u8 flags;
union {
@@ -108,7 +109,7 @@ struct kparam_array
*
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
- * is writable, you may need to use kparam_block_sysfs_write() around
+ * is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
*
* The @type is simply pasted to refer to a param_ops_##type and a
@@ -216,16 +217,16 @@ struct kparam_array
parameters. */
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
/* Default value instead of permissions? */ \
- static const char __param_str_##name[] = prefix #name; \
+ static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
- = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
- level, flags, { arg } }
+ = { __param_str_##name, THIS_MODULE, ops, \
+ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
/* Obsolete - use module_param_cb() */
#define module_param_call(name, set, get, arg, perm) \
- static struct kernel_param_ops __param_ops_##name = \
+ static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, (void *)set, (void *)get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
name, &__param_ops_##name, arg, \
@@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
return 0;
}
-/**
- * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
- * @name: the name of the parameter
- *
- * There's no point blocking write on a paramter that isn't writable via sysfs!
- */
-#define kparam_block_sysfs_write(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0222)); \
- __kernel_param_lock(); \
- } while (0)
-
-/**
- * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_write(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0222)); \
- __kernel_param_unlock(); \
- } while (0)
-
-/**
- * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
- * @name: the name of the parameter
- *
- * This also blocks sysfs writes.
- */
-#define kparam_block_sysfs_read(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0444)); \
- __kernel_param_lock(); \
- } while (0)
-
-/**
- * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_read(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0444)); \
- __kernel_param_unlock(); \
- } while (0)
-
#ifdef CONFIG_SYSFS
-extern void __kernel_param_lock(void);
-extern void __kernel_param_unlock(void);
+extern void kernel_param_lock(struct module *mod);
+extern void kernel_param_unlock(struct module *mod);
#else
-static inline void __kernel_param_lock(void)
+static inline void kernel_param_lock(struct module *mod)
{
}
-static inline void __kernel_param_unlock(void)
+static inline void kernel_param_unlock(struct module *mod)
{
}
#endif
@@ -310,6 +267,15 @@ static inline void __kernel_param_unlock(void)
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
__module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
+
+/**
+ * core_param_unsafe - same as core_param but taints kernel
+ */
+#define core_param_unsafe(name, var, type, perm) \
+ param_check_##type(name, &(var)); \
+ __module_param_call("", name, &param_ops_##type, &var, perm, \
+ -1, KERNEL_PARAM_FL_UNSAFE)
+
#endif /* !MODULE */
/**
@@ -357,8 +323,9 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
+ void *arg,
int (*unknown)(char *param, char *val,
- const char *doing));
+ const char *doing, void *arg));
/* Called by module remove. */
#ifdef CONFIG_SYSFS
@@ -376,64 +343,71 @@ static inline void destroy_params(const struct kernel_param *params,
#define __param_check(name, p, type) \
static inline type __always_unused *__check_##name(void) { return(p); }
-extern struct kernel_param_ops param_ops_byte;
+extern const struct kernel_param_ops param_ops_byte;
extern int param_set_byte(const char *val, const struct kernel_param *kp);
extern int param_get_byte(char *buffer, const struct kernel_param *kp);
#define param_check_byte(name, p) __param_check(name, p, unsigned char)
-extern struct kernel_param_ops param_ops_short;
+extern const struct kernel_param_ops param_ops_short;
extern int param_set_short(const char *val, const struct kernel_param *kp);
extern int param_get_short(char *buffer, const struct kernel_param *kp);
#define param_check_short(name, p) __param_check(name, p, short)
-extern struct kernel_param_ops param_ops_ushort;
+extern const struct kernel_param_ops param_ops_ushort;
extern int param_set_ushort(const char *val, const struct kernel_param *kp);
extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
-extern struct kernel_param_ops param_ops_int;
+extern const struct kernel_param_ops param_ops_int;
extern int param_set_int(const char *val, const struct kernel_param *kp);
extern int param_get_int(char *buffer, const struct kernel_param *kp);
#define param_check_int(name, p) __param_check(name, p, int)
-extern struct kernel_param_ops param_ops_uint;
+extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
-extern struct kernel_param_ops param_ops_long;
+extern const struct kernel_param_ops param_ops_long;
extern int param_set_long(const char *val, const struct kernel_param *kp);
extern int param_get_long(char *buffer, const struct kernel_param *kp);
#define param_check_long(name, p) __param_check(name, p, long)
-extern struct kernel_param_ops param_ops_ulong;
+extern const struct kernel_param_ops param_ops_ulong;
extern int param_set_ulong(const char *val, const struct kernel_param *kp);
extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
-extern struct kernel_param_ops param_ops_ullong;
+extern const struct kernel_param_ops param_ops_ullong;
extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
-extern struct kernel_param_ops param_ops_charp;
+extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
+extern void param_free_charp(void *arg);
#define param_check_charp(name, p) __param_check(name, p, char *)
/* We used to allow int as well as bool. We're taking that away! */
-extern struct kernel_param_ops param_ops_bool;
+extern const struct kernel_param_ops param_ops_bool;
extern int param_set_bool(const char *val, const struct kernel_param *kp);
extern int param_get_bool(char *buffer, const struct kernel_param *kp);
#define param_check_bool(name, p) __param_check(name, p, bool)
-extern struct kernel_param_ops param_ops_invbool;
+extern const struct kernel_param_ops param_ops_bool_enable_only;
+extern int param_set_bool_enable_only(const char *val,
+ const struct kernel_param *kp);
+/* getter is the same as for the regular bool */
+#define param_check_bool_enable_only param_check_bool
+
+extern const struct kernel_param_ops param_ops_invbool;
extern int param_set_invbool(const char *val, const struct kernel_param *kp);
extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
#define param_check_invbool(name, p) __param_check(name, p, bool)
/* An int, which can only be set like a bool (though it shows as an int). */
-extern struct kernel_param_ops param_ops_bint;
+extern const struct kernel_param_ops param_ops_bint;
extern int param_set_bint(const char *val, const struct kernel_param *kp);
#define param_get_bint param_get_int
#define param_check_bint param_check_int
@@ -477,9 +451,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
perm, -1, 0); \
__MODULE_PARM_TYPE(name, "array of " #type)
-extern struct kernel_param_ops param_array_ops;
+extern const struct kernel_param_ops param_array_ops;
-extern struct kernel_param_ops param_ops_string;
+extern const struct kernel_param_ops param_ops_string;
extern int param_set_copystring(const char *val, const struct kernel_param *);
extern int param_get_string(char *buffer, const struct kernel_param *kp);
diff --git a/kernel/include/linux/mpi.h b/kernel/include/linux/mpi.h
index 5af1b81de..3a5abe95a 100644
--- a/kernel/include/linux/mpi.h
+++ b/kernel/include/linux/mpi.h
@@ -31,12 +31,7 @@
#define G10_MPI_H
#include <linux/types.h>
-
-/* DSI defines */
-
-#define SHA1_DIGEST_LENGTH 20
-
-/*end of DSI defines */
+#include <linux/scatterlist.h>
#define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8)
#define BITS_PER_MPI_LIMB BITS_PER_LONG
@@ -78,11 +73,16 @@ void mpi_swap(MPI a, MPI b);
MPI do_encode_md(const void *sha_buffer, unsigned nbits);
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
+MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
int mpi_fromstr(MPI val, const char *str);
u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
+int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ int *sign);
void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
+int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes,
+ int *sign);
#define log_mpidump g10_log_mpidump
@@ -142,4 +142,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n);
/*-- mpi-inv.c --*/
int mpi_invm(MPI x, MPI u, MPI v);
+/* inline functions */
+
+/**
+ * mpi_get_size() - returns max size required to store the number
+ *
+ * @a: A multi precision integer for which we want to allocate a bufer
+ *
+ * Return: size required to store the number
+ */
+static inline unsigned int mpi_get_size(MPI a)
+{
+ return a->nlimbs * BYTES_PER_MPI_LIMB;
+}
#endif /*G10_MPI_H */
diff --git a/kernel/include/linux/mpls_iptunnel.h b/kernel/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000..ef29eb2d6
--- /dev/null
+++ b/kernel/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/kernel/include/linux/msi.h b/kernel/include/linux/msi.h
index 8ac4a68ff..f71a25e5f 100644
--- a/kernel/include/linux/msi.h
+++ b/kernel/include/linux/msi.h
@@ -14,38 +14,85 @@ extern int pci_msi_ignore_mask;
/* Helper functions */
struct irq_data;
struct msi_desc;
+struct pci_dev;
+struct platform_msi_priv_data;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
+ struct msi_msg *msg);
+
+/**
+ * platform_msi_desc - Platform device specific msi descriptor data
+ * @msi_priv_data: Pointer to platform private data
+ * @msi_index: The index of the MSI descriptor for multi MSI
+ */
+struct platform_msi_desc {
+ struct platform_msi_priv_data *msi_priv_data;
+ u16 msi_index;
+};
+
+/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list: List head for management
+ * @irq: The base interrupt number
+ * @nvec_used: The number of vectors used
+ * @dev: Pointer to the device which uses this descriptor
+ * @msg: The last set MSI message cached for reuse
+ *
+ * @masked: [PCI MSI/X] Mask bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
+ * @platform: [platform] Platform device specific msi descriptor data
+ */
struct msi_desc {
- struct {
- __u8 is_msix : 1;
- __u8 multiple: 3; /* log2 num of messages allocated */
- __u8 multi_cap : 3; /* log2 num of messages supported */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
- } msi_attrib;
-
- u32 masked; /* mask bits */
- unsigned int irq;
- unsigned int nvec_used; /* number of messages */
- struct list_head list;
+ /* Shared device/bus type independent data */
+ struct list_head list;
+ unsigned int irq;
+ unsigned int nvec_used;
+ struct device *dev;
+ struct msi_msg msg;
union {
- void __iomem *mask_base;
- u8 mask_pos;
- };
- struct pci_dev *dev;
+ /* PCI MSI/X specific data */
+ struct {
+ u32 masked;
+ struct {
+ __u8 is_msix : 1;
+ __u8 multiple : 3;
+ __u8 multi_cap : 3;
+ __u8 maskbit : 1;
+ __u8 is_64 : 1;
+ __u16 entry_nr;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
+ };
- /* Last set MSI message */
- struct msi_msg msg;
+ /*
+ * Non PCI variants add their data structure here. New
+ * entries need to use a named structure. We want
+ * proper name spaces for this. The PCI part is
+ * anonymous for now as it would require an immediate
+ * tree wide cleanup.
+ */
+ struct platform_msi_desc platform;
+ };
};
/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
-#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define msi_desc_to_dev(desc) ((desc)->dev)
+#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
@@ -56,12 +103,17 @@ struct msi_desc {
#define for_each_pci_msi_entry(desc, pdev) \
for_each_msi_entry((desc), &(pdev)->dev)
-static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
- return desc->dev;
+ return NULL;
}
#endif /* CONFIG_PCI_MSI */
+struct msi_desc *alloc_msi_entry(struct device *dev);
+void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,12 +160,11 @@ struct msi_controller {
struct device *dev;
struct device_node *of_node;
struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
+ int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
+ int nvec, int type);
void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
};
@@ -125,6 +176,7 @@ struct msi_controller {
struct irq_domain;
struct irq_chip;
struct device_node;
+struct fwnode_handle;
struct msi_domain_info;
/**
@@ -213,7 +265,7 @@ enum {
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
-struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
+struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
@@ -221,23 +273,36 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_msi_domain_free_irqs(struct device *dev);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
-struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
int nvec, int type);
void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
-struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info, struct irq_domain *parent);
irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
struct msi_desc *desc);
int pci_msi_domain_check_cap(struct irq_domain *domain,
struct msi_domain_info *info, struct device *dev);
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+#else
+static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+ return NULL;
+}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
#endif /* LINUX_MSI_H */
diff --git a/kernel/include/linux/msm_mdp.h b/kernel/include/linux/msm_mdp.h
deleted file mode 100644
index fe722c1fb..000000000
--- a/kernel/include/linux/msm_mdp.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* include/linux/msm_mdp.h
- *
- * Copyright (C) 2007 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef _MSM_MDP_H_
-#define _MSM_MDP_H_
-
-#include <linux/types.h>
-
-#define MSMFB_IOCTL_MAGIC 'm'
-#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
-#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
-
-enum {
- MDP_RGB_565, /* RGB 565 planar */
- MDP_XRGB_8888, /* RGB 888 padded */
- MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
- MDP_ARGB_8888, /* ARGB 888 */
- MDP_RGB_888, /* RGB 888 planar */
- MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
- MDP_YCRYCB_H2V1, /* YCrYCb interleave */
- MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
- MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
- MDP_RGBA_8888, /* ARGB 888 */
- MDP_BGRA_8888, /* ABGR 888 */
- MDP_RGBX_8888, /* RGBX 888 */
- MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
-};
-
-enum {
- PMEM_IMG,
- FB_IMG,
-};
-
-/* flag values */
-#define MDP_ROT_NOP 0
-#define MDP_FLIP_LR 0x1
-#define MDP_FLIP_UD 0x2
-#define MDP_ROT_90 0x4
-#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_DITHER 0x8
-#define MDP_BLUR 0x10
-
-#define MDP_TRANSP_NOP 0xffffffff
-#define MDP_ALPHA_NOP 0xff
-
-struct mdp_rect {
- u32 x, y, w, h;
-};
-
-struct mdp_img {
- u32 width, height, format, offset;
- int memory_id; /* the file descriptor */
-};
-
-struct mdp_blit_req {
- struct mdp_img src;
- struct mdp_img dst;
- struct mdp_rect src_rect;
- struct mdp_rect dst_rect;
- u32 alpha, transp_mask, flags;
-};
-
-struct mdp_blit_req_list {
- u32 count;
- struct mdp_blit_req req[];
-};
-
-#endif /* _MSM_MDP_H_ */
diff --git a/kernel/include/linux/mtd/cfi.h b/kernel/include/linux/mtd/cfi.h
index 299d7d31f..9b57a9b1b 100644
--- a/kernel/include/linux/mtd/cfi.h
+++ b/kernel/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-/*
- * Returns the command address according to the given geometry.
- */
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
- struct map_info *map, struct cfi_private *cfi)
-{
- unsigned bankwidth = map_bankwidth(map);
- unsigned interleave = cfi_interleave(cfi);
- unsigned type = cfi->device_type;
- uint32_t addr;
-
- addr = (cmd_ofs * type) * interleave;
-
- /* Modify the unlock address if we are in compatibility mode.
- * For 16bit devices on 8 bit busses
- * and 32bit devices on 16 bit busses
- * set the low bit of the alternating bit sequence of the address.
- */
- if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
- addr |= (type >> 1)*interleave;
-
- return addr;
-}
-
-/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
- */
-static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
-{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(map, cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(map, cmd);
- break;
- }
-
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi);
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
- }
-
- return val;
-}
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
- struct cfi_private *cfi)
-{
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onestat, res = 0;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- onestat = val.x[0];
- /* Or all status words together */
- for (i=1; i < words_per_bus; i++) {
- onestat |= val.x[i];
- }
-
- res = onestat;
- switch(chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- res |= (onestat >> (chip_mode * 32));
-#endif
- case 4:
- res |= (onestat >> (chip_mode * 16));
- case 2:
- res |= (onestat >> (chip_mode * 8));
- case 1:
- ;
- }
-
- /* Last, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- case 1:
- break;
- case 2:
- res = cfi16_to_cpu(map, res);
- break;
- case 4:
- res = cfi32_to_cpu(map, res);
- break;
- default: BUG();
- }
- return res;
-}
-
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi);
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
-
-/*
- * Sends a CFI command to a bank of flash for the given geometry.
- *
- * Returns the offset in flash where the command was written.
- * If prev_val is non-null, it will be set to the value at the command address,
- * before the command was written.
- */
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
-{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
- val = cfi_build_cmd(cmd, map, cfi);
-
- if (prev_val)
- *prev_val = map_read(map, addr);
-
- map_write(map, val, addr);
-
- return addr - base;
-}
+ int type, map_word *prev_val);
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
}
}
-static inline void cfi_udelay(int us)
-{
- if (us >= 1000) {
- msleep((us+999)/1000);
- } else {
- udelay(us);
- cond_resched();
- }
-}
+void cfi_udelay(int us);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi);
diff --git a/kernel/include/linux/mtd/map.h b/kernel/include/linux/mtd/map.h
index 29975c73a..366cf7795 100644
--- a/kernel/include/linux/mtd/map.h
+++ b/kernel/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
-#include <asm/io.h>
#include <asm/barrier.h>
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/kernel/include/linux/mtd/nand.h b/kernel/include/linux/mtd/nand.h
index 12b75f3ba..5a9d1d4c2 100644
--- a/kernel/include/linux/mtd/nand.h
+++ b/kernel/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
struct mtd_info;
struct nand_flash_dev;
+struct device_node;
+
/* Scan and identify a NAND device */
extern int nand_scan(struct mtd_info *mtd, int max_chips);
/*
@@ -502,16 +504,16 @@ struct nand_ecc_ctrl {
int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page);
int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
+ const uint8_t *buf, int oob_required, int page);
int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page);
int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page);
int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required);
+ const uint8_t *data_buf, int oob_required, int page);
int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
+ const uint8_t *buf, int oob_required, int page);
int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
int page);
int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -542,6 +544,7 @@ struct nand_buffers {
* flash device
* @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
* flash device.
+ * @flash_node: [BOARDSPECIFIC] device node describing this instance
* @read_byte: [REPLACEABLE] read one byte from the chip
* @read_word: [REPLACEABLE] read one word from the chip
* @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -553,10 +556,6 @@ struct nand_buffers {
* @block_markbad: [REPLACEABLE] mark a block bad
* @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
* ALE/CLE/nCE. Also used to write command and address
- * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting
- * mtd->oobsize, mtd->writesize and so on.
- * @id_data contains the 8 bytes values of NAND_CMD_READID.
- * Return with the bus width.
* @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
* device ready/busy line. If set to NULL no access to
* ready/busy is available and the ready/busy information
@@ -644,6 +643,8 @@ struct nand_chip {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ struct device_node *flash_node;
+
uint8_t (*read_byte)(struct mtd_info *mtd);
u16 (*read_word)(struct mtd_info *mtd);
void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -653,8 +654,6 @@ struct nand_chip {
int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- int (*init_size)(struct mtd_info *mtd, struct nand_chip *this,
- u8 *id_data);
int (*dev_ready)(struct mtd_info *mtd);
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
@@ -833,7 +832,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_default_bbt(struct mtd_info *mtd);
extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
@@ -1026,4 +1024,9 @@ struct nand_sdr_timings {
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int threshold);
#endif /* __LINUX_MTD_NAND_H */
diff --git a/kernel/include/linux/mtd/spi-nor.h b/kernel/include/linux/mtd/spi-nor.h
index e5409524b..bc742dac7 100644
--- a/kernel/include/linux/mtd/spi-nor.h
+++ b/kernel/include/linux/mtd/spi-nor.h
@@ -10,6 +10,23 @@
#ifndef __LINUX_MTD_SPI_NOR_H
#define __LINUX_MTD_SPI_NOR_H
+#include <linux/bitops.h>
+#include <linux/mtd/cfi.h>
+
+/*
+ * Manufacturer IDs
+ *
+ * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
+ * Sometimes these are the same as CFI IDs, but sometimes they aren't.
+ */
+#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
+#define SNOR_MFR_INTEL CFI_MFR_INTEL
+#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
+#define SNOR_MFR_SPANSION CFI_MFR_AMD
+#define SNOR_MFR_SST CFI_MFR_SST
+#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
+
/*
* Note on opcode nomenclature: some opcodes have a format like
* SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
@@ -61,24 +78,24 @@
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
/* Status Register bits. */
-#define SR_WIP 1 /* Write in progress */
-#define SR_WEL 2 /* Write enable latch */
+#define SR_WIP BIT(0) /* Write in progress */
+#define SR_WEL BIT(1) /* Write enable latch */
/* meaning of other SR_* bits may differ between vendors */
-#define SR_BP0 4 /* Block protect 0 */
-#define SR_BP1 8 /* Block protect 1 */
-#define SR_BP2 0x10 /* Block protect 2 */
-#define SR_SRWD 0x80 /* SR write protect */
+#define SR_BP0 BIT(2) /* Block protect 0 */
+#define SR_BP1 BIT(3) /* Block protect 1 */
+#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_SRWD BIT(7) /* SR write protect */
-#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
+#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
/* Enhanced Volatile Configuration Register bits */
-#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */
+#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
-#define FSR_READY 0x80
+#define FSR_READY BIT(7)
/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
+#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
enum read_mode {
SPI_NOR_NORMAL = 0,
@@ -87,33 +104,6 @@ enum read_mode {
SPI_NOR_QUAD,
};
-/**
- * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer
- * @wren: command for "Write Enable", or 0x00 for not required
- * @cmd: command for operation
- * @cmd_pins: number of pins to send @cmd (1, 2, 4)
- * @addr: address for operation
- * @addr_pins: number of pins to send @addr (1, 2, 4)
- * @addr_width: number of address bytes
- * (3,4, or 0 for address not required)
- * @mode: mode data
- * @mode_pins: number of pins to send @mode (1, 2, 4)
- * @mode_cycles: number of mode cycles (0 for mode not required)
- * @dummy_cycles: number of dummy cycles (0 for dummy not required)
- */
-struct spi_nor_xfer_cfg {
- u8 wren;
- u8 cmd;
- u8 cmd_pins;
- u32 addr;
- u8 addr_pins;
- u8 addr_width;
- u8 mode;
- u8 mode_pins;
- u8 mode_cycles;
- u8 dummy_cycles;
-};
-
#define SPI_NOR_MAX_CMD_SIZE 8
enum spi_nor_ops {
SPI_NOR_OPS_READ = 0,
@@ -127,11 +117,14 @@ enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
};
+struct mtd_info;
+
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
* @dev: point to a spi device, or a spi nor controller device.
+ * @flash_node: point to a device node describing this flash instance.
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -141,28 +134,28 @@ enum spi_nor_option_flags {
* @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
* @flags: flag options for the current SPI-NOR (SNOR_F_*)
- * @cfg: used by the read_xfer/write_xfer
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
* @unprepare: [OPTIONAL] do some post work after the
* read/write/erase/lock/unlock operations
- * @read_xfer: [OPTIONAL] the read fundamental primitive
- * @write_xfer: [OPTIONAL] the writefundamental primitive
* @read_reg: [DRIVER-SPECIFIC] read out the register
* @write_reg: [DRIVER-SPECIFIC] write data to the register
* @read: [DRIVER-SPECIFIC] read data from the SPI NOR
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
* at the offset @offs
- * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
- * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
+ * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
+ * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
+ * completely locked
* @priv: the private data
*/
struct spi_nor {
- struct mtd_info *mtd;
+ struct mtd_info mtd;
struct mutex lock;
struct device *dev;
+ struct device_node *flash_node;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
@@ -172,18 +165,12 @@ struct spi_nor {
enum read_mode flash_read;
bool sst_write_second;
u32 flags;
- struct spi_nor_xfer_cfg cfg;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg,
- u8 *buf, size_t len);
- int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg,
- u8 *buf, size_t len);
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
- int write_enable);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*read)(struct spi_nor *nor, loff_t from,
size_t len, size_t *retlen, u_char *read_buf);
@@ -193,6 +180,7 @@ struct spi_nor {
int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
void *priv;
};
diff --git a/kernel/include/linux/n_r3964.h b/kernel/include/linux/n_r3964.h
index 5d0b2a1de..90a803aa4 100644
--- a/kernel/include/linux/n_r3964.h
+++ b/kernel/include/linux/n_r3964.h
@@ -152,9 +152,6 @@ struct r3964_info {
unsigned char *rx_buf; /* ring buffer */
unsigned char *tx_buf;
- wait_queue_head_t read_wait;
- //struct wait_queue *read_wait;
-
struct r3964_block_header *rx_first;
struct r3964_block_header *rx_last;
struct r3964_block_header *tx_first;
@@ -164,8 +161,9 @@ struct r3964_info {
unsigned char last_rx;
unsigned char bcc;
unsigned int blocks_in_rx_queue;
-
-
+
+ struct mutex read_lock; /* serialize r3964_read */
+
struct r3964_client_info *firstClient;
unsigned int state;
unsigned int flags;
diff --git a/kernel/include/linux/namei.h b/kernel/include/linux/namei.h
index c8990779f..d8c6334cd 100644
--- a/kernel/include/linux/namei.h
+++ b/kernel/include/linux/namei.h
@@ -1,16 +1,15 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
-#include <linux/dcache.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/path.h>
-
-struct vfsmount;
-struct nameidata;
+#include <linux/fcntl.h>
+#include <linux/errno.h>
enum { MAX_NESTED_LINKS = 8 };
+#define MAXSYMLINKS 40
+
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
-extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
-#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
-#define user_path_dir(name, path) \
- user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
+{
+ return user_path_at_empty(dfd, name, flags, path, NULL);
+}
+
+static inline int user_path(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
+}
+
+static inline int user_lpath(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
+}
+
+static inline int user_path_dir(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
+}
extern int kern_path(const char *, unsigned, struct path *);
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/kernel/include/linux/nd.h b/kernel/include/linux/nd.h
new file mode 100644
index 000000000..507e47c86
--- /dev/null
+++ b/kernel/include/linux/nd.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __LINUX_ND_H__
+#define __LINUX_ND_H__
+#include <linux/fs.h>
+#include <linux/ndctl.h>
+#include <linux/device.h>
+
+struct nd_device_driver {
+ struct device_driver drv;
+ unsigned long type;
+ int (*probe)(struct device *dev);
+ int (*remove)(struct device *dev);
+};
+
+static inline struct nd_device_driver *to_nd_device_driver(
+ struct device_driver *drv)
+{
+ return container_of(drv, struct nd_device_driver, drv);
+};
+
+/**
+ * struct nd_namespace_common - core infrastructure of a namespace
+ * @force_raw: ignore other personalities for the namespace (e.g. btt)
+ * @dev: device model node
+ * @claim: when set a another personality has taken ownership of the namespace
+ * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
+ */
+struct nd_namespace_common {
+ int force_raw;
+ struct device dev;
+ struct device *claim;
+ int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
+ void *buf, size_t size, int rw);
+};
+
+static inline struct nd_namespace_common *to_ndns(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_common, dev);
+}
+
+/**
+ * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
+ * @dev: namespace device created by the nd region driver
+ * @res: struct resource conversion of a NFIT SPA table
+ */
+struct nd_namespace_io {
+ struct nd_namespace_common common;
+ struct resource res;
+};
+
+/**
+ * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
+ * @nsio: device and system physical address range to drive
+ * @alt_name: namespace name supplied in the dimm label
+ * @uuid: namespace name supplied in the dimm label
+ */
+struct nd_namespace_pmem {
+ struct nd_namespace_io nsio;
+ char *alt_name;
+ u8 *uuid;
+};
+
+/**
+ * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
+ * @alt_name: namespace name supplied in the dimm label
+ * @uuid: namespace name supplied in the dimm label
+ * @id: ida allocated id
+ * @lbasize: blk namespaces have a native sector size when btt not present
+ * @num_resources: number of dpa extents to claim
+ * @res: discontiguous dpa extents for given dimm
+ */
+struct nd_namespace_blk {
+ struct nd_namespace_common common;
+ char *alt_name;
+ u8 *uuid;
+ int id;
+ unsigned long lbasize;
+ int num_resources;
+ struct resource **res;
+};
+
+static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_io, common.dev);
+}
+
+static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
+
+ return container_of(nsio, struct nd_namespace_pmem, nsio);
+}
+
+static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_blk, common.dev);
+}
+
+/**
+ * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
+ * @ndns: device to read
+ * @offset: namespace-relative starting offset
+ * @buf: buffer to fill
+ * @size: transfer length
+ *
+ * @buf is up-to-date upon return from this routine.
+ */
+static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size)
+{
+ return ndns->rw_bytes(ndns, offset, buf, size, READ);
+}
+
+/**
+ * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
+ * @ndns: device to read
+ * @offset: namespace-relative starting offset
+ * @buf: buffer to drain
+ * @size: transfer length
+ *
+ * NVDIMM Namepaces disks do not implement sectors internally. Depending on
+ * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
+ * or on backing memory media upon return from this routine. Flushing
+ * to media is handled internal to the @ndns driver, if at all.
+ */
+static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size)
+{
+ return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
+}
+
+#define MODULE_ALIAS_ND_DEVICE(type) \
+ MODULE_ALIAS("nd:t" __stringify(type) "*")
+#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
+
+int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
+ struct module *module, const char *mod_name);
+#define nd_driver_register(driver) \
+ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#endif /* __LINUX_ND_H__ */
diff --git a/kernel/include/linux/net.h b/kernel/include/linux/net.h
index 738ea48be..0b4ac7da5 100644
--- a/kernel/include/linux/net.h
+++ b/kernel/include/linux/net.h
@@ -24,7 +24,8 @@
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/once.h>
+
#include <uapi/linux/net.h>
struct poll_table_struct;
@@ -33,12 +34,15 @@ struct inode;
struct file;
struct net;
-#define SOCK_ASYNC_NOSPACE 0
-#define SOCK_ASYNC_WAITDATA 1
+/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
+ * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
+ * Eventually all flags will be in sk->sk_wq_flags.
+ */
+#define SOCKWQ_ASYNC_NOSPACE 0
+#define SOCKWQ_ASYNC_WAITDATA 1
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
-#define SOCK_EXTERNALLY_ALLOCATED 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -89,6 +93,7 @@ struct socket_wq {
/* Note: wait MUST be first field of socket_wq */
wait_queue_head_t wait;
struct fasync_struct *fasync_list;
+ unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
struct rcu_head rcu;
} ____cacheline_aligned_in_smp;
@@ -96,7 +101,7 @@ struct socket_wq {
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
* @type: socket type (%SOCK_STREAM, etc)
- * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
+ * @flags: socket flags (%SOCK_NOSPACE, etc)
* @ops: protocol specific socket operations
* @file: File back pointer for gc
* @sk: internal networking protocol agnostic socket representation
@@ -202,13 +207,13 @@ enum {
SOCK_WAKE_URG,
};
-int sock_wake_async(struct socket *sk, int how, int band);
+int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
@@ -240,25 +245,19 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...) \
+ do { \
+ if (0) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#endif
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *done_key);
-
-#define net_get_random_once(buf, nbytes) \
- ({ \
- bool ___ret = false; \
- static bool ___done = false; \
- static struct static_key ___once_key = \
- STATIC_KEY_INIT_TRUE; \
- if (static_key_true(&___once_key)) \
- ___ret = __net_get_random_once(buf, \
- nbytes, \
- &___done, \
- &___once_key); \
- ___ret; \
- })
+#define net_get_random_once(buf, nbytes) \
+ get_random_once((buf), (nbytes))
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
diff --git a/kernel/include/linux/netdev_features.h b/kernel/include/linux/netdev_features.h
index 7d59dc6ab..f0d87347d 100644
--- a/kernel/include/linux/netdev_features.h
+++ b/kernel/include/linux/netdev_features.h
@@ -66,7 +66,6 @@ enum {
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
- NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
/*
* Add your fresh new feature above and remember to update
@@ -125,7 +124,9 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
-#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
+
+#define for_each_netdev_feature(mask_addr, bit) \
+ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -161,8 +162,7 @@ enum {
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_HW_SWITCH_OFFLOAD)
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
/*
* If one device doesn't support one of these features, then disable it
@@ -170,6 +170,12 @@ enum {
*/
#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+/*
+ * If upper/master device has these features disabled, they must be disabled
+ * on all lower/slave devices as well.
+ */
+#define NETIF_F_UPPER_DISABLES NETIF_F_LRO
+
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
diff --git a/kernel/include/linux/netdevice.h b/kernel/include/linux/netdevice.h
index 7a289e802..c0e12f7f0 100644
--- a/kernel/include/linux/netdevice.h
+++ b/kernel/include/linux/netdevice.h
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
+ clear_bit(NAPI_STATE_NPSVC, &n->state);
}
#ifdef CONFIG_SMP
@@ -717,8 +718,8 @@ struct xps_map {
u16 queues[0];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
-#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
- / sizeof(u16))
+#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
+ - sizeof(struct xps_map)) / sizeof(u16))
/*
* This structure holds all XPS maps for device. Maps are indexed by CPU.
@@ -766,6 +767,13 @@ struct netdev_phys_item_id {
unsigned char id_len;
};
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+ struct netdev_phys_item_id *b)
+{
+ return a->id_len == b->id_len &&
+ memcmp(a->id, b->id, a->id_len) == 0;
+}
+
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
@@ -873,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
@@ -1041,6 +1050,16 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ * bool proto_down);
+ * This function is used to pass protocol port error state information
+ * to the switch driver. The switch driver can react to the proto_down
+ * by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ * This function is used to get egress tunnel information for given skb.
+ * This is useful for retrieving outer tunnel header parameters while
+ * sampling packet.
+ *
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1095,11 +1114,17 @@ struct net_device_ops {
int max_tx_rate);
int (*ndo_set_vf_spoofchk)(struct net_device *dev,
int vf, bool setting);
+ int (*ndo_set_vf_trust)(struct net_device *dev,
+ int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
+ int (*ndo_get_vf_stats)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_stats
+ *vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
@@ -1207,6 +1232,10 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
+ int (*ndo_change_proto_down)(struct net_device *dev,
+ bool proto_down);
+ int (*ndo_fill_metadata_dst)(struct net_device *dev,
+ struct sk_buff *skb);
};
/**
@@ -1221,13 +1250,8 @@ struct net_device_ops {
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
* @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
* @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
* @IFF_WAN_HDLC: WAN HDLC device
* @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
* release skb->dst
@@ -1243,44 +1267,42 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_L3MDEV_MASTER: device is an L3 master device
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
+ * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
- IFF_SLAVE_INACTIVE = 1<<2,
- IFF_MASTER_8023AD = 1<<3,
- IFF_MASTER_ALB = 1<<4,
- IFF_BONDING = 1<<5,
- IFF_SLAVE_NEEDARP = 1<<6,
- IFF_ISATAP = 1<<7,
- IFF_MASTER_ARPMON = 1<<8,
- IFF_WAN_HDLC = 1<<9,
- IFF_XMIT_DST_RELEASE = 1<<10,
- IFF_DONT_BRIDGE = 1<<11,
- IFF_DISABLE_NETPOLL = 1<<12,
- IFF_MACVLAN_PORT = 1<<13,
- IFF_BRIDGE_PORT = 1<<14,
- IFF_OVS_DATAPATH = 1<<15,
- IFF_TX_SKB_SHARING = 1<<16,
- IFF_UNICAST_FLT = 1<<17,
- IFF_TEAM_PORT = 1<<18,
- IFF_SUPP_NOFCS = 1<<19,
- IFF_LIVE_ADDR_CHANGE = 1<<20,
- IFF_MACVLAN = 1<<21,
- IFF_XMIT_DST_RELEASE_PERM = 1<<22,
- IFF_IPVLAN_MASTER = 1<<23,
- IFF_IPVLAN_SLAVE = 1<<24,
+ IFF_BONDING = 1<<2,
+ IFF_ISATAP = 1<<3,
+ IFF_WAN_HDLC = 1<<4,
+ IFF_XMIT_DST_RELEASE = 1<<5,
+ IFF_DONT_BRIDGE = 1<<6,
+ IFF_DISABLE_NETPOLL = 1<<7,
+ IFF_MACVLAN_PORT = 1<<8,
+ IFF_BRIDGE_PORT = 1<<9,
+ IFF_OVS_DATAPATH = 1<<10,
+ IFF_TX_SKB_SHARING = 1<<11,
+ IFF_UNICAST_FLT = 1<<12,
+ IFF_TEAM_PORT = 1<<13,
+ IFF_SUPP_NOFCS = 1<<14,
+ IFF_LIVE_ADDR_CHANGE = 1<<15,
+ IFF_MACVLAN = 1<<16,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<17,
+ IFF_IPVLAN_MASTER = 1<<18,
+ IFF_IPVLAN_SLAVE = 1<<19,
+ IFF_L3MDEV_MASTER = 1<<20,
+ IFF_NO_QUEUE = 1<<21,
+ IFF_OPENVSWITCH = 1<<22,
+ IFF_L3MDEV_SLAVE = 1<<23,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD IFF_MASTER_8023AD
-#define IFF_MASTER_ALB IFF_MASTER_ALB
#define IFF_BONDING IFF_BONDING
-#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
#define IFF_ISATAP IFF_ISATAP
-#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
#define IFF_WAN_HDLC IFF_WAN_HDLC
#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1297,6 +1319,10 @@ enum netdev_priv_flags {
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
+#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
+#define IFF_NO_QUEUE IFF_NO_QUEUE
+#define IFF_OPENVSWITCH IFF_OPENVSWITCH
+#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
/**
* struct net_device - The DEVICE structure.
@@ -1372,7 +1398,8 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length
+ * @hard_header_len: Hardware header length, which means that this is the
+ * minimum size of a packet.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1444,6 +1471,8 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
+ * @offload_fwd_mark: Offload device fwding mark
+ *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1498,6 +1527,10 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1564,7 +1597,10 @@ struct net_device {
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_SWITCHDEV
- const struct swdev_ops *swdev_ops;
+ const struct switchdev_ops *switchdev_ops;
+#endif
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ const struct l3mdev_ops *l3mdev_ops;
#endif
const struct header_ops *header_ops;
@@ -1652,7 +1688,14 @@ struct net_device {
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto __rcu *ingress_cl_list;
+#endif
struct netdev_queue __rcu *ingress_queue;
+#ifdef CONFIG_NETFILTER_INGRESS
+ struct list_head nf_hooks_ingress;
+#endif
+
unsigned char broadcast[MAX_ADDR_LEN];
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rx_cpu_rmap;
@@ -1674,6 +1717,10 @@ struct net_device {
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ u32 offload_fwd_mark;
+#endif
+
/* These may be needed for future network-power-down code. */
/*
@@ -1751,6 +1798,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1990,6 +2038,7 @@ struct offload_callbacks {
struct packet_offload {
__be16 type; /* This is really htons(ether_type). */
+ u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};
@@ -2020,20 +2069,23 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
-#define netdev_alloc_pcpu_stats(type) \
-({ \
- typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
- if (pcpu_stats) { \
- int __cpu; \
- for_each_possible_cpu(__cpu) { \
- typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, __cpu); \
- u64_stats_init(&stat->syncp); \
- } \
- } \
- pcpu_stats; \
+#define __netdev_alloc_pcpu_stats(type, gfp) \
+({ \
+ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
})
+#define netdev_alloc_pcpu_stats(type) \
+ __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -2068,6 +2120,7 @@ struct pcpu_sw_netstats {
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
#define NETDEV_CHANGEINFODATA 0x0018
#define NETDEV_BONDING_INFO 0x0019
+#define NETDEV_PRECHANGEUPPER 0x001A
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2081,6 +2134,13 @@ struct netdev_notifier_change_info {
unsigned int flags_changed;
};
+struct netdev_notifier_changeupper_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct net_device *upper_dev; /* new upper dev */
+ bool master; /* is upper dev master */
+ bool linking; /* is the nofication for link or unlink */
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2161,6 +2221,7 @@ void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2171,12 +2232,8 @@ int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
int dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
-int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
-static inline int dev_queue_xmit(struct sk_buff *skb)
-{
- return dev_queue_xmit_sk(skb->sk, skb);
-}
+int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2192,11 +2249,20 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int dev_recursion_level(void)
+{
+ return current->xmit_recursion;
+}
+
+#else
+
DECLARE_PER_CPU(int, xmit_recursion);
static inline int dev_recursion_level(void)
{
return this_cpu_read(xmit_recursion);
}
+#endif
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
@@ -2265,8 +2331,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
- skb_gro_offset(skb));
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
}
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2362,37 +2427,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
grc->delta = 0;
}
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
{
__wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start =
- ((unsigned char *)ptr + start) - skb->head;
- return;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
}
- delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
/* Adjust skb->csum since we changed the packet */
NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
- grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->offset = off + hdrlen + offset;
grc->delta = delta;
+
+ return ptr;
}
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
struct gro_remcsum *grc)
{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
if (!grc->delta)
return;
- remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2553,10 +2639,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
- if (WARN_ON(!dev_queue)) {
- pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
- return;
- }
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2572,15 +2654,7 @@ static inline void netif_stop_queue(struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- netif_tx_stop_queue(txq);
- }
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -2841,6 +2915,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
}
#endif
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues);
+
/*
* Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
* as a distribution range limit for the returned value.
@@ -2938,11 +3015,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
-static inline int netif_receive_skb(struct sk_buff *skb)
-{
- return netif_receive_skb_sk(skb->sk, skb);
-}
+int netif_receive_skb(struct sk_buff *skb);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -2980,6 +3053,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3779,6 +3853,31 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_is_l3_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_L3MDEV_MASTER;
+}
+
+static inline bool netif_is_l3_slave(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_L3MDEV_SLAVE;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_bridge_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_BRIDGE_PORT;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/kernel/include/linux/netfilter.h b/kernel/include/linux/netfilter.h
index 63560d0a8..0ad556726 100644
--- a/kernel/include/linux/netfilter.h
+++ b/kernel/include/linux/netfilter.h
@@ -10,7 +10,10 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
-#include <uapi/linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
@@ -38,9 +41,6 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
int netfilter_init(void);
-/* Largest hook number + 1 */
-#define NF_MAX_HOOKS 8
-
struct sk_buff;
struct nf_hook_ops;
@@ -54,16 +54,20 @@ struct nf_hook_state {
struct net_device *in;
struct net_device *out;
struct sock *sk;
- int (*okfn)(struct sock *, struct sk_buff *);
+ struct net *net;
+ struct list_head *hook_list;
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
static inline void nf_hook_state_init(struct nf_hook_state *p,
+ struct list_head *hook_list,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
struct net_device *outdev,
struct sock *sk,
- int (*okfn)(struct sock *, struct sk_buff *))
+ struct net *net,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
p->hook = hook;
p->thresh = thresh;
@@ -71,24 +75,26 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->in = indev;
p->out = outdev;
p->sk = sk;
+ p->net = net;
+ p->hook_list = hook_list;
p->okfn = okfn;
}
-typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
+typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
+ struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -116,6 +122,13 @@ struct nf_sockopt_ops {
};
/* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -126,23 +139,23 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *hook_list,
+ u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(hook_list);
}
#else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *hook_list,
+ u_int8_t pf, unsigned int hook)
{
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(hook_list);
}
#endif
@@ -150,35 +163,38 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
- *
+ *
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct net *net,
struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sock *, struct sk_buff *),
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- if (nf_hooks_active(pf, hook)) {
+ struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+ if (nf_hook_list_active(hook_list, pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook, thresh, pf,
- indev, outdev, sk, okfn);
+ nf_hook_state_init(&state, hook_list, hook, thresh,
+ pf, indev, outdev, sk, net, okfn);
return nf_hook_slow(skb, &state);
}
return 1;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
- struct sk_buff *skb, struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sock *, struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+ struct sock *sk, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
+ return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
}
/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -199,36 +215,38 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in,
struct net_device *out,
- int (*okfn)(struct sock *, struct sk_buff *), int thresh)
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+ int thresh)
{
- int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
+ int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
if (ret == 1)
- ret = okfn(sk, skb);
+ ret = okfn(net, sk, skb);
return ret;
}
static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
- int (*okfn)(struct sock *, struct sk_buff *), bool cond)
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+ bool cond)
{
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
- ret = okfn(sk, skb);
+ ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ret = okfn(net, sk, skb);
return ret;
}
static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
- int (*okfn)(struct sock *, struct sk_buff *))
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
+ return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
}
/* Call setsockopt() */
@@ -264,7 +282,7 @@ struct nf_afinfo {
struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
- int (*reroute)(struct sk_buff *skb,
+ int (*reroute)(struct net *net, struct sk_buff *skb,
const struct nf_queue_entry *entry);
int route_key_size;
};
@@ -328,21 +346,27 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
-#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
-static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
- struct sock *sk,
- struct sk_buff *skb,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
+static inline int
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+ bool cond)
+{
+ return okfn(net, sk, skb);
+}
+
+static inline int
+NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- return okfn(sk, skb);
+ return okfn(net, sk, skb);
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
- struct sk_buff *skb, struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sock *, struct sk_buff *))
+
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+ struct sock *sk, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return 1;
}
@@ -354,26 +378,43 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+#else
+static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+#endif
struct nf_conn;
enum ip_conntrack_info;
struct nlattr;
-struct nfq_ct_hook {
+struct nfnl_ct_hook {
+ struct nf_conn *(*get_ct)(const struct sk_buff *skb,
+ enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
- int (*build)(struct sk_buff *skb, struct nf_conn *ct);
+ int (*build)(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ u_int16_t ct_attr, u_int16_t ct_info_attr);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfq_ct_hook __rcu *nfq_ct_hook;
-#else
-static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
-#endif
+extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
#endif /*__LINUX_NETFILTER_H*/
diff --git a/kernel/include/linux/netfilter/ipset/ip_set.h b/kernel/include/linux/netfilter/ipset/ip_set.h
index 34b172301..0e1f433cc 100644
--- a/kernel/include/linux/netfilter/ipset/ip_set.h
+++ b/kernel/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@ struct ip_set_counter {
atomic64_t packets;
};
+struct ip_set_comment_rcu {
+ struct rcu_head rcu;
+ char str[0];
+};
+
struct ip_set_comment {
- char *str;
+ struct ip_set_comment_rcu __rcu *c;
};
struct ip_set_skbinfo {
@@ -122,13 +127,13 @@ struct ip_set_skbinfo {
struct ip_set;
#define ext_timeout(e, s) \
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
#define ext_comment(e, s) \
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
#define ext_skbinfo(e, s) \
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@ struct ip_set_type_variant {
/* List elements */
int (*list)(const struct ip_set *set, struct sk_buff *skb,
struct netlink_callback *cb);
+ /* Keep listing private when resizing runs parallel */
+ void (*uref)(struct ip_set *set, struct netlink_callback *cb,
+ bool start);
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
@@ -223,7 +231,7 @@ struct ip_set {
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
- rwlock_t lock;
+ spinlock_t lock;
/* References to the set */
u32 ref;
/* The core set type */
@@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask))) ||
(skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
-
}
static inline void
@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
/* Netlink CB args */
enum {
- IPSET_CB_NET = 0,
- IPSET_CB_DUMP,
- IPSET_CB_INDEX,
- IPSET_CB_ARG0,
+ IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_DUMP, /* dump single set/all sets */
+ IPSET_CB_INDEX, /* set index */
+ IPSET_CB_PRIVATE, /* set private data */
+ IPSET_CB_ARG0, /* type specific */
IPSET_CB_ARG1,
- IPSET_CB_ARG2,
};
/* register and unregister set references */
@@ -414,7 +421,7 @@ extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
- size_t len);
+ size_t len, size_t align);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
@@ -533,29 +540,9 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-static inline int
+int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active)
-{
- if (SET_WITH_TIMEOUT(set)) {
- unsigned long *timeout = ext_timeout(e, set);
-
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(active ? ip_set_timeout_get(timeout)
- : *timeout)))
- return -EMSGSIZE;
- }
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_COMMENT(set) &&
- ip_set_put_comment(skb, ext_comment(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_SKBINFO(set) &&
- ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
- return -EMSGSIZE;
- return 0;
-}
+ const void *e, bool active);
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
@@ -565,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
.timeout = (set)->timeout }
-#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
-
#define IPSET_CONCAT(a, b) a##b
#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
diff --git a/kernel/include/linux/netfilter/ipset/ip_set_comment.h b/kernel/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea00..8d0248525 100644
--- a/kernel/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/kernel/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
return nla_data(tb);
}
+/* Called from uadd only, protected by the set spinlock.
+ * The kadt functions don't use the comment extensions in any way.
+ */
static inline void
ip_set_init_comment(struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
+ struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
- if (unlikely(comment->str)) {
- kfree(comment->str);
- comment->str = NULL;
+ if (unlikely(c)) {
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
if (!len)
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
- comment->str = kzalloc(len + 1, GFP_ATOMIC);
- if (unlikely(!comment->str))
+ c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ if (unlikely(!c))
return;
- strlcpy(comment->str, ext->comment, len + 1);
+ strlcpy(c->str, ext->comment, len + 1);
+ rcu_assign_pointer(comment->c, c);
}
+/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
{
- if (!comment->str)
+ struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+
+ if (!c)
return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
}
+/* Called from uadd/udel, flush or the garbage collectors protected
+ * by the set spinlock.
+ * Called when the set is destroyed and when there can't be any user
+ * of the set data anymore.
+ */
static inline void
ip_set_comment_free(struct ip_set_comment *comment)
{
- if (unlikely(!comment->str))
+ struct ip_set_comment_rcu *c;
+
+ c = rcu_dereference_protected(comment->c, 1);
+ if (unlikely(!c))
return;
- kfree(comment->str);
- comment->str = NULL;
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
#endif
diff --git a/kernel/include/linux/netfilter/ipset/ip_set_timeout.h b/kernel/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e08..1d6a935c1 100644
--- a/kernel/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/kernel/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
}
static inline bool
-ip_set_timeout_test(unsigned long timeout)
+ip_set_timeout_expired(unsigned long *t)
{
- return timeout == IPSET_ELEM_PERMANENT ||
- time_is_after_jiffies(timeout);
-}
-
-static inline bool
-ip_set_timeout_expired(unsigned long *timeout)
-{
- return *timeout != IPSET_ELEM_PERMANENT &&
- time_is_before_jiffies(*timeout);
+ return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
}
static inline void
-ip_set_timeout_set(unsigned long *timeout, u32 t)
+ip_set_timeout_set(unsigned long *timeout, u32 value)
{
- if (!t) {
+ unsigned long t;
+
+ if (!value) {
*timeout = IPSET_ELEM_PERMANENT;
return;
}
- *timeout = msecs_to_jiffies(t * 1000) + jiffies;
- if (*timeout == IPSET_ELEM_PERMANENT)
+ t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */
- (*timeout)--;
+ t--;
+ *timeout = t;
}
static inline u32
ip_set_timeout_get(unsigned long *timeout)
{
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/1000;
+ jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
}
#endif /* __KERNEL__ */
diff --git a/kernel/include/linux/netfilter/nf_conntrack_zones_common.h b/kernel/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000..5d7cf36d4
--- /dev/null
+++ b/kernel/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID 0
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK 1
+
+struct nf_conntrack_zone {
+ u16 id;
+ u8 flags;
+ u8 dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/kernel/include/linux/netfilter/nfnetlink.h b/kernel/include/linux/netfilter/nfnetlink.h
index e955d4730..5646b24bf 100644
--- a/kernel/include/linux/netfilter/nfnetlink.h
+++ b/kernel/include/linux/netfilter/nfnetlink.h
@@ -14,7 +14,7 @@ struct nfnl_callback {
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
- int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
const struct nla_policy *policy; /* netlink attribute policy */
@@ -45,11 +45,11 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
-int lockdep_nfnl_is_held(__u8 subsys_id);
+bool lockdep_nfnl_is_held(__u8 subsys_id);
#else
-static inline int lockdep_nfnl_is_held(__u8 subsys_id)
+static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
{
- return 1;
+ return true;
}
#endif /* CONFIG_PROVE_LOCKING */
diff --git a/kernel/include/linux/netfilter/nfnetlink_acct.h b/kernel/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748..80ca889b1 100644
--- a/kernel/include/linux/netfilter/nfnetlink_acct.h
+++ b/kernel/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
#define _NFNL_ACCT_H_
#include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
enum {
NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
struct nf_acct;
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/kernel/include/linux/netfilter/x_tables.h b/kernel/include/linux/netfilter/x_tables.h
index f434fc08c..8d8b410f2 100644
--- a/kernel/include/linux/netfilter/x_tables.h
+++ b/kernel/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
+#include <linux/static_key.h>
#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
@@ -13,6 +14,7 @@
* @target: the target extension
* @matchinfo: per-match data
* @targetinfo: per-target data
+ * @net network namespace through which the action was invoked
* @in: input netdevice
* @out: output netdevice
* @fragoff: packet is a fragment, this is the data offset
@@ -24,7 +26,6 @@
* Fields written to by extensions:
*
* @hotdrop: drop packet if we had inspection problems
- * Network namespace obtainable using dev_net(in/out)
*/
struct xt_action_param {
union {
@@ -34,6 +35,7 @@ struct xt_action_param {
union {
const void *matchinfo, *targinfo;
};
+ struct net *net;
const struct net_device *in, *out;
int fragoff;
unsigned int thoff;
@@ -63,6 +65,7 @@ struct xt_mtchk_param {
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/**
@@ -93,6 +96,7 @@ struct xt_tgchk_param {
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/* Target destructor parameters */
@@ -221,15 +225,11 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int __percpu *stackptr;
void ***jumpstack;
- /* ipt_entry tables: one per CPU */
- /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
- void *entries[1];
+
+ unsigned char entries[0] __aligned(8);
};
-#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
- + nr_cpu_ids * sizeof(char *))
int xt_register_target(struct xt_target *target);
void xt_unregister_target(struct xt_target *target);
int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -285,6 +285,12 @@ DECLARE_PER_CPU(seqcount_t, xt_recseq);
DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
/**
* xt_write_recseq_begin - start of a write section
*
@@ -358,6 +364,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+
+/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
+ * real (percpu) counter. On !SMP, its just the packet count,
+ * so nothing needs to be done there.
+ *
+ * xt_percpu_counter_alloc returns the address of the percpu
+ * counter, or 0 on !SMP. We force an alignment of 16 bytes
+ * so that bytes/packets share a common cache line.
+ *
+ * Hence caller must use IS_ERR_VALUE to check for error, this
+ * allows us to return 0 for single core systems without forcing
+ * callers to deal with SMP vs. NONSMP issues.
+ */
+static inline u64 xt_percpu_counter_alloc(void)
+{
+ if (nr_cpu_ids > 1) {
+ void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
+ sizeof(struct xt_counters));
+
+ if (res == NULL)
+ return (u64) -ENOMEM;
+
+ return (u64) (__force unsigned long) res;
+ }
+
+ return 0;
+}
+static inline void xt_percpu_counter_free(u64 pcnt)
+{
+ if (nr_cpu_ids > 1)
+ free_percpu((void __percpu *) (unsigned long) pcnt);
+}
+
+static inline struct xt_counters *
+xt_get_this_cpu_counter(struct xt_counters *cnt)
+{
+ if (nr_cpu_ids > 1)
+ return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
+
+ return cnt;
+}
+
+static inline struct xt_counters *
+xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
+{
+ if (nr_cpu_ids > 1)
+ return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
+
+ return cnt;
+}
+
struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
diff --git a/kernel/include/linux/netfilter_arp/arp_tables.h b/kernel/include/linux/netfilter_arp/arp_tables.h
index c22a7fb8d..6f074db2f 100644
--- a/kernel/include/linux/netfilter_arp/arp_tables.h
+++ b/kernel/include/linux/netfilter_arp/arp_tables.h
@@ -53,7 +53,6 @@ extern struct xt_table *arpt_register_table(struct net *net,
const struct arpt_replace *repl);
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
- unsigned int hook,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/kernel/include/linux/netfilter_bridge.h b/kernel/include/linux/netfilter_bridge.h
index f2fdb5a52..2ed40c402 100644
--- a/kernel/include/linux/netfilter_bridge.h
+++ b/kernel/include/linux/netfilter_bridge.h
@@ -17,17 +17,7 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_NF_BRIDGE_PREROUTING 0x08
-
-static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
-{
- if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
- return PPPOE_SES_HLEN;
- return 0;
-}
-
-int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
+int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb)
{
@@ -70,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return false;
+}
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif
diff --git a/kernel/include/linux/netfilter_bridge/ebtables.h b/kernel/include/linux/netfilter_bridge/ebtables.h
index f1bd3962e..2ea517c7c 100644
--- a/kernel/include/linux/netfilter_bridge/ebtables.h
+++ b/kernel/include/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef __LINUX_BRIDGE_EFF_H
@@ -111,9 +111,9 @@ struct ebt_table {
extern struct ebt_table *ebt_register_table(struct net *net,
const struct ebt_table *table);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
-extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- struct ebt_table *table);
+extern unsigned int ebt_do_table(struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ struct ebt_table *table);
/* Used in the kernel match() functions */
#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
diff --git a/kernel/include/linux/netfilter_defs.h b/kernel/include/linux/netfilter_defs.h
new file mode 100644
index 000000000..d3a7f8597
--- /dev/null
+++ b/kernel/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_NETFILTER_CORE_H_
+#define __LINUX_NETFILTER_CORE_H_
+
+#include <uapi/linux/netfilter.h>
+
+/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
+#define NF_MAX_HOOKS 8
+
+#endif
diff --git a/kernel/include/linux/netfilter_ingress.h b/kernel/include/linux/netfilter_ingress.h
new file mode 100644
index 000000000..5fcd375ef
--- /dev/null
+++ b/kernel/include/linux/netfilter_ingress.h
@@ -0,0 +1,44 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef HAVE_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return !list_empty(&skb->dev->nf_hooks_ingress);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+ NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
+ skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
+ return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+ INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/kernel/include/linux/netfilter_ipv4.h b/kernel/include/linux/netfilter_ipv4.h
index 6e4591bb5..98c03b246 100644
--- a/kernel/include/linux/netfilter_ipv4.h
+++ b/kernel/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
#include <uapi/linux/netfilter_ipv4.h>
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/kernel/include/linux/netfilter_ipv4/ip_tables.h b/kernel/include/linux/netfilter_ipv4/ip_tables.h
index 4073510da..aa598f942 100644
--- a/kernel/include/linux/netfilter_ipv4/ip_tables.h
+++ b/kernel/include/linux/netfilter_ipv4/ip_tables.h
@@ -64,7 +64,6 @@ struct ipt_error {
extern void *ipt_alloc_initial_table(const struct xt_table *);
extern unsigned int ipt_do_table(struct sk_buff *skb,
- unsigned int hook,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/kernel/include/linux/netfilter_ipv6.h b/kernel/include/linux/netfilter_ipv6.h
index 64dad1cc1..47c6b04c2 100644
--- a/kernel/include/linux/netfilter_ipv6.h
+++ b/kernel/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
#include <uapi/linux/netfilter_ipv6.h>
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -25,8 +16,19 @@ void ipv6_netfilter_fini(void);
struct nf_ipv6_ops {
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+ void (*route_input)(struct sk_buff *skb);
+ int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
};
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
{
@@ -36,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/kernel/include/linux/netfilter_ipv6/ip6_tables.h b/kernel/include/linux/netfilter_ipv6/ip6_tables.h
index b40d2b635..0f76e5c67 100644
--- a/kernel/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/kernel/include/linux/netfilter_ipv6/ip6_tables.h
@@ -30,7 +30,6 @@ extern struct xt_table *ip6t_register_table(struct net *net,
const struct ip6t_replace *repl);
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
- unsigned int hook,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/kernel/include/linux/netlink.h b/kernel/include/linux/netlink.h
index 6835c1279..639e9b8b0 100644
--- a/kernel/include/linux/netlink.h
+++ b/kernel/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
__u32 dst_group;
__u32 flags;
struct sock *sk;
+ bool nsid_is_set;
+ int nsid;
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
@@ -66,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
+
+extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask);
+static inline struct sk_buff *
+netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
+ gfp_t gfp_mask)
+{
+ return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
+}
+
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
diff --git a/kernel/include/linux/nfs4.h b/kernel/include/linux/nfs4.h
index 32201c269..e7e78537a 100644
--- a/kernel/include/linux/nfs4.h
+++ b/kernel/include/linux/nfs4.h
@@ -130,6 +130,7 @@ enum nfs_opnum4 {
OP_READ_PLUS = 68,
OP_SEEK = 69,
OP_WRITE_SAME = 70,
+ OP_CLONE = 71,
OP_ILLEGAL = 10044,
};
@@ -421,6 +422,7 @@ enum lock_type4 {
#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
/* MDS threshold bitmap bits */
@@ -500,6 +502,8 @@ enum {
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
+ NFSPROC4_CLNT_LAYOUTSTATS,
+ NFSPROC4_CLNT_CLONE,
};
/* nfs41 types */
@@ -546,6 +550,24 @@ enum pnfs_notify_deviceid_type4 {
NOTIFY_DEVICEID4_DELETE = 1 << 2,
};
+enum pnfs_block_volume_type {
+ PNFS_BLOCK_VOLUME_SIMPLE = 0,
+ PNFS_BLOCK_VOLUME_SLICE = 1,
+ PNFS_BLOCK_VOLUME_CONCAT = 2,
+ PNFS_BLOCK_VOLUME_STRIPE = 3,
+};
+
+enum pnfs_block_extent_state {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2,
+ PNFS_BLOCK_NONE_DATA = 3,
+};
+
+/* on the wire size of a block layout extent */
+#define PNFS_BLOCK_EXTENT_SIZE \
+ (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+
#define NFL4_UFLG_MASK 0x0000003F
#define NFL4_UFLG_DENSE 0x00000001
#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/kernel/include/linux/nfs_fs.h b/kernel/include/linux/nfs_fs.h
index b95f914ce..5455b660b 100644
--- a/kernel/include/linux/nfs_fs.h
+++ b/kernel/include/linux/nfs_fs.h
@@ -219,6 +219,7 @@ struct nfs_inode {
#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
+#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -291,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
+ NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
@@ -349,7 +353,6 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -367,6 +370,7 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
+extern void nfs_file_clear_open_context(struct file *flip);
extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
extern u64 nfs_compat_user_ino64(u64 fileid);
@@ -540,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- if (size > (__u64) OFFSET_MAX - 1)
- return OFFSET_MAX - 1;
- return (loff_t) size;
+ return min_t(u64, size, OFFSET_MAX);
}
static inline ino_t
diff --git a/kernel/include/linux/nfs_fs_sb.h b/kernel/include/linux/nfs_fs_sb.h
index 5e1273d4d..2469ab0bb 100644
--- a/kernel/include/linux/nfs_fs_sb.h
+++ b/kernel/include/linux/nfs_fs_sb.h
@@ -147,6 +147,7 @@ struct nfs_server {
unsigned int acdirmax;
unsigned int namelen;
unsigned int options; /* extra options enabled by mount */
+ unsigned int clone_blksize; /* granularity of a CLONE operation */
#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
@@ -173,6 +174,11 @@ struct nfs_server {
set of attributes supported
on this filesystem excluding
the label support bit. */
+ u32 exclcreat_bitmask[3];
+ /* V4 bitmask representing the
+ set of attributes supported
+ on this filesystem for the
+ exclusive create. */
u32 cache_consistency_bitmask[3];
/* V4 bitmask representing the subset
of change attribute, size, ctime
@@ -220,7 +226,7 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-#define NFS_CAP_CHANGE_ATTR (1U << 5)
+/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
#define NFS_CAP_FILEID (1U << 6)
#define NFS_CAP_MODE (1U << 7)
#define NFS_CAP_NLINK (1U << 8)
@@ -237,5 +243,7 @@ struct nfs_server {
#define NFS_CAP_SEEK (1U << 19)
#define NFS_CAP_ALLOCATE (1U << 20)
#define NFS_CAP_DEALLOCATE (1U << 21)
+#define NFS_CAP_LAYOUTSTATS (1U << 22)
+#define NFS_CAP_CLONE (1U << 23)
#endif
diff --git a/kernel/include/linux/nfs_page.h b/kernel/include/linux/nfs_page.h
index 3eb072dbc..f2f650f13 100644
--- a/kernel/include/linux/nfs_page.h
+++ b/kernel/include/linux/nfs_page.h
@@ -67,7 +67,6 @@ struct nfs_rw_ops {
const fmode_t rw_mode;
struct nfs_pgio_header *(*rw_alloc_header)(void);
void (*rw_free_header)(struct nfs_pgio_header *);
- void (*rw_release)(struct nfs_pgio_header *);
int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
struct inode *);
void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
diff --git a/kernel/include/linux/nfs_xdr.h b/kernel/include/linux/nfs_xdr.h
index e9e9a8dcf..11bbae44f 100644
--- a/kernel/include/linux/nfs_xdr.h
+++ b/kernel/include/linux/nfs_xdr.h
@@ -141,6 +141,7 @@ struct nfs_fsinfo {
__u32 lease_time; /* in seconds */
__u32 layouttype; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
+ __u32 clone_blksize; /* granularity of a CLONE operation */
};
struct nfs_fsstat {
@@ -250,6 +251,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
gfp_t gfp_flags;
+ long timeout;
};
struct nfs4_getdeviceinfo_args {
@@ -316,6 +318,68 @@ struct nfs4_layoutreturn {
int rpc_status;
};
+#define PNFS_LAYOUTSTATS_MAXSIZE 256
+
+struct nfs42_layoutstat_args;
+struct nfs42_layoutstat_devinfo;
+typedef void (*layoutstats_encode_t)(struct xdr_stream *,
+ struct nfs42_layoutstat_args *,
+ struct nfs42_layoutstat_devinfo *);
+
+/* Per file per deviceid layoutstats */
+struct nfs42_layoutstat_devinfo {
+ struct nfs4_deviceid dev_id;
+ __u64 offset;
+ __u64 length;
+ __u64 read_count;
+ __u64 read_bytes;
+ __u64 write_count;
+ __u64 write_bytes;
+ __u32 layout_type;
+ layoutstats_encode_t layoutstats_encode;
+ void *layout_private;
+};
+
+struct nfs42_layoutstat_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ struct inode *inode;
+ nfs4_stateid stateid;
+ int num_dev;
+ struct nfs42_layoutstat_devinfo *devinfo;
+};
+
+struct nfs42_layoutstat_res {
+ struct nfs4_sequence_res seq_res;
+ int num_dev;
+ int rpc_status;
+};
+
+struct nfs42_layoutstat_data {
+ struct inode *inode;
+ struct nfs42_layoutstat_args args;
+ struct nfs42_layoutstat_res res;
+};
+
+struct nfs42_clone_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *src_fh;
+ struct nfs_fh *dst_fh;
+ nfs4_stateid src_stateid;
+ nfs4_stateid dst_stateid;
+ __u64 src_offset;
+ __u64 dst_offset;
+ __u64 count;
+ const u32 *dst_bitmask;
+};
+
+struct nfs42_clone_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int rpc_status;
+ struct nfs_fattr *dst_fattr;
+ const struct nfs_server *server;
+};
+
struct stateowner_id {
__u64 create_time;
__u32 uniquifier;
@@ -336,7 +400,7 @@ struct nfs_openargs {
struct stateowner_id id;
union {
struct {
- struct iattr * attrs; /* UNCHECKED, GUARDED */
+ struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
nfs4_verifier verifier; /* EXCLUSIVE */
};
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
@@ -346,7 +410,7 @@ struct nfs_openargs {
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
const u32 * open_bitmap;
- __u32 claim;
+ enum open_claim_type4 claim;
enum createmode4 createmode;
const struct nfs4_label *label;
};
@@ -363,8 +427,8 @@ struct nfs_openres {
const struct nfs_server *server;
fmode_t delegation_type;
nfs4_stateid delegation;
+ unsigned long pagemod_limit;
__u32 do_recall;
- __u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
@@ -485,7 +549,7 @@ struct nfs4_delegreturnargs {
struct nfs4_delegreturnres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- const struct nfs_server *server;
+ struct nfs_server *server;
};
/*
@@ -558,7 +622,7 @@ struct nfs_removeargs {
struct nfs_removeres {
struct nfs4_sequence_res seq_res;
- const struct nfs_server *server;
+ struct nfs_server *server;
struct nfs_fattr *dir_attr;
struct nfs4_change_info cinfo;
};
@@ -576,7 +640,7 @@ struct nfs_renameargs {
struct nfs_renameres {
struct nfs4_sequence_res seq_res;
- const struct nfs_server *server;
+ struct nfs_server *server;
struct nfs4_change_info old_cinfo;
struct nfs_fattr *old_fattr;
struct nfs4_change_info new_cinfo;
@@ -642,7 +706,6 @@ struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
- unsigned int acl_pgbase;
struct page ** acl_pages;
};
@@ -654,7 +717,6 @@ struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
- unsigned int acl_pgbase;
struct page ** acl_pages;
};
@@ -984,17 +1046,14 @@ struct nfs4_readlink_res {
struct nfs4_sequence_res seq_res;
};
-#define NFS4_SETCLIENTID_NAMELEN (127)
struct nfs4_setclientid {
const nfs4_verifier * sc_verifier;
- unsigned int sc_name_len;
- char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
u32 sc_prog;
unsigned int sc_netid_len;
char sc_netid[RPCBIND_MAXNETIDLEN + 1];
unsigned int sc_uaddr_len;
char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
- u32 sc_cb_ident;
+ struct nfs_client *sc_clnt;
struct rpc_cred *sc_cred;
};
@@ -1017,11 +1076,13 @@ struct nfs4_statfs_res {
struct nfs4_server_caps_arg {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fhandle;
+ const u32 * bitmask;
};
struct nfs4_server_caps_res {
struct nfs4_sequence_res seq_res;
u32 attr_bitmask[3];
+ u32 exclcreat_bitmask[3];
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
@@ -1142,12 +1203,9 @@ struct nfs41_state_protection {
struct nfs4_op_map allow;
};
-#define NFS4_EXCHANGE_ID_LEN (127)
struct nfs41_exchange_id_args {
struct nfs_client *client;
nfs4_verifier *verifier;
- unsigned int id_len;
- char id[NFS4_EXCHANGE_ID_LEN];
u32 flags;
struct nfs41_state_protection state_protect;
};
diff --git a/kernel/include/linux/nmi.h b/kernel/include/linux/nmi.h
index 3d46fb470..7ec5b8673 100644
--- a/kernel/include/linux/nmi.h
+++ b/kernel/include/linux/nmi.h
@@ -27,9 +27,7 @@ static inline void touch_nmi_watchdog(void)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
#else
-static inline void hardlockup_detector_disable(void)
-{
-}
+static inline void hardlockup_detector_disable(void) {}
#endif
/*
@@ -49,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
arch_trigger_all_cpu_backtrace(false);
return true;
}
+
+/* generic implementation */
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask));
+bool nmi_cpu_backtrace(struct pt_regs *regs);
+
#else
static inline bool trigger_all_cpu_backtrace(void)
{
@@ -67,7 +71,9 @@ extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long *watchdog_cpumask_bits;
extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
@@ -77,6 +83,19 @@ extern int proc_soft_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int proc_watchdog_thresh(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
+extern int proc_watchdog_cpumask(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int lockup_detector_suspend(void);
+extern void lockup_detector_resume(void);
+#else
+static inline int lockup_detector_suspend(void)
+{
+ return 0;
+}
+
+static inline void lockup_detector_resume(void)
+{
+}
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/kernel/include/linux/ntb.h b/kernel/include/linux/ntb.h
index 9ac1a62fc..f798e2afb 100644
--- a/kernel/include/linux/ntb.h
+++ b/kernel/include/linux/ntb.h
@@ -4,15 +4,20 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
* BSD LICENSE
*
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,49 +45,939 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Intel PCIe NTB Linux driver
+ * PCIe NTB Linux driver
*
* Contact Information:
- * Jon Mason <jon.mason@intel.com>
+ * Allen Hubbe <Allen.Hubbe@emc.com>
*/
-struct ntb_transport_qp;
+#ifndef _NTB_H_
+#define _NTB_H_
-struct ntb_client {
- struct device_driver driver;
- int (*probe)(struct pci_dev *pdev);
- void (*remove)(struct pci_dev *pdev);
+#include <linux/completion.h>
+#include <linux/device.h>
+
+struct ntb_client;
+struct ntb_dev;
+struct pci_dev;
+
+/**
+ * enum ntb_topo - NTB connection topology
+ * @NTB_TOPO_NONE: Topology is unknown or invalid.
+ * @NTB_TOPO_PRI: On primary side of local ntb.
+ * @NTB_TOPO_SEC: On secondary side of remote ntb.
+ * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
+ * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
+ */
+enum ntb_topo {
+ NTB_TOPO_NONE = -1,
+ NTB_TOPO_PRI,
+ NTB_TOPO_SEC,
+ NTB_TOPO_B2B_USD,
+ NTB_TOPO_B2B_DSD,
+};
+
+static inline int ntb_topo_is_b2b(enum ntb_topo topo)
+{
+ switch ((int)topo) {
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ return 1;
+ }
+ return 0;
+}
+
+static inline char *ntb_topo_string(enum ntb_topo topo)
+{
+ switch (topo) {
+ case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
+ case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
+ case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
+ case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
+ case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
+ }
+ return "NTB_TOPO_INVALID";
+}
+
+/**
+ * enum ntb_speed - NTB link training speed
+ * @NTB_SPEED_AUTO: Request the max supported speed.
+ * @NTB_SPEED_NONE: Link is not trained to any speed.
+ * @NTB_SPEED_GEN1: Link is trained to gen1 speed.
+ * @NTB_SPEED_GEN2: Link is trained to gen2 speed.
+ * @NTB_SPEED_GEN3: Link is trained to gen3 speed.
+ */
+enum ntb_speed {
+ NTB_SPEED_AUTO = -1,
+ NTB_SPEED_NONE = 0,
+ NTB_SPEED_GEN1 = 1,
+ NTB_SPEED_GEN2 = 2,
+ NTB_SPEED_GEN3 = 3,
+};
+
+/**
+ * enum ntb_width - NTB link training width
+ * @NTB_WIDTH_AUTO: Request the max supported width.
+ * @NTB_WIDTH_NONE: Link is not trained to any width.
+ * @NTB_WIDTH_1: Link is trained to 1 lane width.
+ * @NTB_WIDTH_2: Link is trained to 2 lane width.
+ * @NTB_WIDTH_4: Link is trained to 4 lane width.
+ * @NTB_WIDTH_8: Link is trained to 8 lane width.
+ * @NTB_WIDTH_12: Link is trained to 12 lane width.
+ * @NTB_WIDTH_16: Link is trained to 16 lane width.
+ * @NTB_WIDTH_32: Link is trained to 32 lane width.
+ */
+enum ntb_width {
+ NTB_WIDTH_AUTO = -1,
+ NTB_WIDTH_NONE = 0,
+ NTB_WIDTH_1 = 1,
+ NTB_WIDTH_2 = 2,
+ NTB_WIDTH_4 = 4,
+ NTB_WIDTH_8 = 8,
+ NTB_WIDTH_12 = 12,
+ NTB_WIDTH_16 = 16,
+ NTB_WIDTH_32 = 32,
+};
+
+/**
+ * struct ntb_client_ops - ntb client operations
+ * @probe: Notify client of a new device.
+ * @remove: Notify client to remove a device.
+ */
+struct ntb_client_ops {
+ int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
+ void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
+};
+
+static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ ops->probe &&
+ ops->remove &&
+ 1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb driver context operations
+ * @link_event: See ntb_link_event().
+ * @db_event: See ntb_db_event().
+ */
+struct ntb_ctx_ops {
+ void (*link_event)(void *ctx);
+ void (*db_event)(void *ctx, int db_vector);
+};
+
+static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ /* ops->link_event && */
+ /* ops->db_event && */
+ 1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb device operations
+ * @mw_count: See ntb_mw_count().
+ * @mw_get_range: See ntb_mw_get_range().
+ * @mw_set_trans: See ntb_mw_set_trans().
+ * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @link_is_up: See ntb_link_is_up().
+ * @link_enable: See ntb_link_enable().
+ * @link_disable: See ntb_link_disable().
+ * @db_is_unsafe: See ntb_db_is_unsafe().
+ * @db_valid_mask: See ntb_db_valid_mask().
+ * @db_vector_count: See ntb_db_vector_count().
+ * @db_vector_mask: See ntb_db_vector_mask().
+ * @db_read: See ntb_db_read().
+ * @db_set: See ntb_db_set().
+ * @db_clear: See ntb_db_clear().
+ * @db_read_mask: See ntb_db_read_mask().
+ * @db_set_mask: See ntb_db_set_mask().
+ * @db_clear_mask: See ntb_db_clear_mask().
+ * @peer_db_addr: See ntb_peer_db_addr().
+ * @peer_db_read: See ntb_peer_db_read().
+ * @peer_db_set: See ntb_peer_db_set().
+ * @peer_db_clear: See ntb_peer_db_clear().
+ * @peer_db_read_mask: See ntb_peer_db_read_mask().
+ * @peer_db_set_mask: See ntb_peer_db_set_mask().
+ * @peer_db_clear_mask: See ntb_peer_db_clear_mask().
+ * @spad_is_unsafe: See ntb_spad_is_unsafe().
+ * @spad_count: See ntb_spad_count().
+ * @spad_read: See ntb_spad_read().
+ * @spad_write: See ntb_spad_write().
+ * @peer_spad_addr: See ntb_peer_spad_addr().
+ * @peer_spad_read: See ntb_peer_spad_read().
+ * @peer_spad_write: See ntb_peer_spad_write().
+ */
+struct ntb_dev_ops {
+ int (*mw_count)(struct ntb_dev *ntb);
+ int (*mw_get_range)(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size,
+ resource_size_t *align, resource_size_t *align_size);
+ int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size);
+ int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
+
+ int (*link_is_up)(struct ntb_dev *ntb,
+ enum ntb_speed *speed, enum ntb_width *width);
+ int (*link_enable)(struct ntb_dev *ntb,
+ enum ntb_speed max_speed, enum ntb_width max_width);
+ int (*link_disable)(struct ntb_dev *ntb);
+
+ int (*db_is_unsafe)(struct ntb_dev *ntb);
+ u64 (*db_valid_mask)(struct ntb_dev *ntb);
+ int (*db_vector_count)(struct ntb_dev *ntb);
+ u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
+
+ u64 (*db_read)(struct ntb_dev *ntb);
+ int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
+ int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+ u64 (*db_read_mask)(struct ntb_dev *ntb);
+ int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+ int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+ int (*peer_db_addr)(struct ntb_dev *ntb,
+ phys_addr_t *db_addr, resource_size_t *db_size);
+ u64 (*peer_db_read)(struct ntb_dev *ntb);
+ int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
+ int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+ u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
+ int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+ int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+ int (*spad_is_unsafe)(struct ntb_dev *ntb);
+ int (*spad_count)(struct ntb_dev *ntb);
+
+ u32 (*spad_read)(struct ntb_dev *ntb, int idx);
+ int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+
+ int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
+ phys_addr_t *spad_addr);
+ u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
+ int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
};
-enum {
- NTB_LINK_DOWN = 0,
- NTB_LINK_UP,
+static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ ops->mw_count &&
+ ops->mw_get_range &&
+ ops->mw_set_trans &&
+ /* ops->mw_clear_trans && */
+ ops->link_is_up &&
+ ops->link_enable &&
+ ops->link_disable &&
+ /* ops->db_is_unsafe && */
+ ops->db_valid_mask &&
+
+ /* both set, or both unset */
+ (!ops->db_vector_count == !ops->db_vector_mask) &&
+
+ ops->db_read &&
+ /* ops->db_set && */
+ ops->db_clear &&
+ /* ops->db_read_mask && */
+ ops->db_set_mask &&
+ ops->db_clear_mask &&
+ ops->peer_db_addr &&
+ /* ops->peer_db_read && */
+ ops->peer_db_set &&
+ /* ops->peer_db_clear && */
+ /* ops->peer_db_read_mask && */
+ /* ops->peer_db_set_mask && */
+ /* ops->peer_db_clear_mask && */
+ /* ops->spad_is_unsafe && */
+ ops->spad_count &&
+ ops->spad_read &&
+ ops->spad_write &&
+ ops->peer_spad_addr &&
+ /* ops->peer_spad_read && */
+ ops->peer_spad_write &&
+ 1;
+}
+
+/**
+ * struct ntb_client - client interested in ntb devices
+ * @drv: Linux driver object.
+ * @ops: See &ntb_client_ops.
+ */
+struct ntb_client {
+ struct device_driver drv;
+ const struct ntb_client_ops ops;
};
-int ntb_register_client(struct ntb_client *drvr);
-void ntb_unregister_client(struct ntb_client *drvr);
-int ntb_register_client_dev(char *device_name);
-void ntb_unregister_client_dev(char *device_name);
-
-struct ntb_queue_handlers {
- void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- void (*event_handler)(void *data, int status);
+#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
+
+/**
+ * struct ntb_device - ntb device
+ * @dev: Linux device object.
+ * @pdev: Pci device entry of the ntb.
+ * @topo: Detected topology of the ntb.
+ * @ops: See &ntb_dev_ops.
+ * @ctx: See &ntb_ctx_ops.
+ * @ctx_ops: See &ntb_ctx_ops.
+ */
+struct ntb_dev {
+ struct device dev;
+ struct pci_dev *pdev;
+ enum ntb_topo topo;
+ const struct ntb_dev_ops *ops;
+ void *ctx;
+ const struct ntb_ctx_ops *ctx_ops;
+
+ /* private: */
+
+ /* synchronize setting, clearing, and calling ctx_ops */
+ spinlock_t ctx_lock;
+ /* block unregister until device is fully released */
+ struct completion released;
};
-unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
-unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
-struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, struct pci_dev *pdev,
- const struct ntb_queue_handlers *handlers);
-void ntb_transport_free_queue(struct ntb_transport_qp *qp);
-int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
- unsigned int len);
-int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
- unsigned int len);
-void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
-void ntb_transport_link_up(struct ntb_transport_qp *qp);
-void ntb_transport_link_down(struct ntb_transport_qp *qp);
-bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
+
+/**
+ * ntb_register_client() - register a client for interest in ntb devices
+ * @client: Client context.
+ *
+ * The client will be added to the list of clients interested in ntb devices.
+ * The client will be notified of any ntb devices that are not already
+ * associated with a client, or if ntb devices are registered later.
+ *
+ * Return: Zero if the client is registered, otherwise an error number.
+ */
+#define ntb_register_client(client) \
+ __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+ const char *mod_name);
+
+/**
+ * ntb_unregister_client() - unregister a client for interest in ntb devices
+ * @client: Client context.
+ *
+ * The client will be removed from the list of clients interested in ntb
+ * devices. If any ntb devices are associated with the client, the client will
+ * be notified to remove those devices.
+ */
+void ntb_unregister_client(struct ntb_client *client);
+
+#define module_ntb_client(__ntb_client) \
+ module_driver(__ntb_client, ntb_register_client, \
+ ntb_unregister_client)
+
+/**
+ * ntb_register_device() - register a ntb device
+ * @ntb: NTB device context.
+ *
+ * The device will be added to the list of ntb devices. If any clients are
+ * interested in ntb devices, each client will be notified of the ntb device,
+ * until at most one client accepts the device.
+ *
+ * Return: Zero if the device is registered, otherwise an error number.
+ */
+int ntb_register_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_register_device() - unregister a ntb device
+ * @ntb: NTB device context.
+ *
+ * The device will be removed from the list of ntb devices. If the ntb device
+ * is associated with a client, the client will be notified to remove the
+ * device.
+ */
+void ntb_unregister_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_set_ctx() - associate a driver context with an ntb device
+ * @ntb: NTB device context.
+ * @ctx: Driver context.
+ * @ctx_ops: Driver context operations.
+ *
+ * Associate a driver context and operations with a ntb device. The context is
+ * provided by the client driver, and the driver may associate a different
+ * context with each ntb device.
+ *
+ * Return: Zero if the context is associated, otherwise an error number.
+ */
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+ const struct ntb_ctx_ops *ctx_ops);
+
+/**
+ * ntb_clear_ctx() - disassociate any driver context from an ntb device
+ * @ntb: NTB device context.
+ *
+ * Clear any association that may exist between a driver context and the ntb
+ * device.
+ */
+void ntb_clear_ctx(struct ntb_dev *ntb);
+
+/**
+ * ntb_link_event() - notify driver context of a change in link status
+ * @ntb: NTB device context.
+ *
+ * Notify the driver context that the link status may have changed. The driver
+ * should call ntb_link_is_up() to get the current status.
+ */
+void ntb_link_event(struct ntb_dev *ntb);
+
+/**
+ * ntb_db_event() - notify driver context of a doorbell event
+ * @ntb: NTB device context.
+ * @vector: Interrupt vector number.
+ *
+ * Notify the driver context of a doorbell event. If hardware supports
+ * multiple interrupt vectors for doorbells, the vector number indicates which
+ * vector received the interrupt. The vector number is relative to the first
+ * vector used for doorbells, starting at zero, and must be less than
+ ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which
+ * doorbell bits need service, and ntb_db_vector_mask() to determine which of
+ * those bits are associated with the vector number.
+ */
+void ntb_db_event(struct ntb_dev *ntb, int vector);
+
+/**
+ * ntb_mw_count() - get the number of memory windows
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_mw_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->mw_count(ntb);
+}
+
+/**
+ * ntb_mw_get_range() - get the range of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ * @base: OUT - the base address for mapping the memory window
+ * @size: OUT - the size for mapping the memory window
+ * @align: OUT - the base alignment for translating the memory window
+ * @align_size: OUT - the size alignment for translating the memory window
+ *
+ * Get the range of a memory window. NULL may be given for any output
+ * parameter if the value is not needed. The base and size may be used for
+ * mapping the memory window, to access the peer memory. The alignment and
+ * size may be used for translating the memory window, for the peer to access
+ * memory on the local system.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size,
+ resource_size_t *align, resource_size_t *align_size)
+{
+ return ntb->ops->mw_get_range(ntb, idx, base, size,
+ align, align_size);
+}
+
+/**
+ * ntb_mw_set_trans() - set the translation of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ * @addr: The dma address local memory to expose to the peer.
+ * @size: The size of the local memory to expose to the peer.
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * must be aligned to the alignment specified by ntb_mw_get_range(). The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range().
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ return ntb->ops->mw_set_trans(ntb, idx, addr, size);
+}
+
+/**
+ * ntb_mw_clear_trans() - clear the translation of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ *
+ * Clear the translation of a memory window. The peer may no longer access
+ * local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
+{
+ if (!ntb->ops->mw_clear_trans)
+ return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
+
+ return ntb->ops->mw_clear_trans(ntb, idx);
+}
+
+/**
+ * ntb_link_is_up() - get the current ntb link state
+ * @ntb: NTB device context.
+ * @speed: OUT - The link speed expressed as PCIe generation number.
+ * @width: OUT - The link width expressed as the number of PCIe lanes.
+ *
+ * Get the current state of the ntb link. It is recommended to query the link
+ * state once after every link event. It is safe to query the link state in
+ * the context of the link event callback.
+ *
+ * Return: One if the link is up, zero if the link is down, otherwise a
+ * negative value indicating the error number.
+ */
+static inline int ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed, enum ntb_width *width)
+{
+ return ntb->ops->link_is_up(ntb, speed, width);
+}
+
+/**
+ * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * @ntb: NTB device context.
+ * @max_speed: The maximum link speed expressed as PCIe generation number.
+ * @max_width: The maximum link width expressed as the number of PCIe lanes.
+ *
+ * Enable the link on the secondary side of the ntb. This can only be done
+ * from the primary side of the ntb in primary or b2b topology. The ntb device
+ * should train the link to its maximum speed and width, or the requested speed
+ * and width, whichever is smaller, if supported.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return ntb->ops->link_enable(ntb, max_speed, max_width);
+}
+
+/**
+ * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * @ntb: NTB device context.
+ *
+ * Disable the link on the secondary side of the ntb. This can only be
+ * done from the primary side of the ntb in primary or b2b topology. The ntb
+ * device should disable the link. Returning from this call must indicate that
+ * a barrier has passed, though with no more writes may pass in either
+ * direction across the link, except if this call returns an error number.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_disable(struct ntb_dev *ntb)
+{
+ return ntb->ops->link_disable(ntb);
+}
+
+/**
+ * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
+ * @ntb: NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata. Hardware
+ * drivers can advise clients to avoid using doorbells. Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use doorbells, or One if it is not safe.
+ */
+static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_is_unsafe)
+ return 0;
+
+ return ntb->ops->db_is_unsafe(ntb);
+}
+
+/**
+ * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ * @ntb: NTB device context.
+ *
+ * Hardware may support different number or arrangement of doorbell bits.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ return ntb->ops->db_valid_mask(ntb);
+}
+
+/**
+ * ntb_db_vector_count() - get the number of doorbell interrupt vectors
+ * @ntb: NTB device context.
+ *
+ * Hardware may support different number of interrupt vectors.
+ *
+ * Return: The number of doorbell interrupt vectors.
+ */
+static inline int ntb_db_vector_count(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_vector_count)
+ return 1;
+
+ return ntb->ops->db_vector_count(ntb);
+}
+
+/**
+ * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
+ * @ntb: NTB device context.
+ * @vector: Doorbell vector number.
+ *
+ * Each interrupt vector may have a different number or arrangement of bits.
+ *
+ * Return: A mask of doorbell bits serviced by a vector.
+ */
+static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
+{
+ if (!ntb->ops->db_vector_mask)
+ return ntb_db_valid_mask(ntb);
+
+ return ntb->ops->db_vector_mask(ntb, vector);
+}
+
+/**
+ * ntb_db_read() - read the local doorbell register
+ * @ntb: NTB device context.
+ *
+ * Read the local doorbell register, and return the bits that are set.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+static inline u64 ntb_db_read(struct ntb_dev *ntb)
+{
+ return ntb->ops->db_read(ntb);
+}
+
+/**
+ * ntb_db_set() - set bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to set.
+ *
+ * Set bits in the local doorbell register, which may generate a local doorbell
+ * interrupt. Bits that were already set must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_set)
+ return -EINVAL;
+
+ return ntb->ops->db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear() - clear bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_db_read_mask() - read the local doorbell mask
+ * @ntb: NTB device context.
+ *
+ * Read the local doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, though hardware is likely to support it.
+ *
+ * Return: The bits currently set in the local doorbell mask register.
+ */
+static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_read_mask)
+ return 0;
+
+ return ntb->ops->db_read_mask(ntb);
+}
+
+/**
+ * ntb_db_set_mask() - set bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * Set bits in the local doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits. Bits that were already set
+ * must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear_mask() - clear bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits. If a doorbell bit is already
+ * set at the time the mask is cleared, and the corresponding mask bit is
+ * changed from set to clear, then the ntb driver must ensure that
+ * ntb_db_event() is called. If the hardware does not generate the interrupt
+ * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_addr() - address and size of the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_addr: OUT - The address of the peer doorbell register.
+ * @db_size: OUT - The number of bytes to write the peer doorbell register.
+ *
+ * Return the address of the peer doorbell register. This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ * The drivers may wish to ring the peer doorbell at the completion of memory
+ * copy operations. For efficiency, and to simplify ordering of operations
+ * between the dma memory copies and the ringing doorbell, the driver may
+ * append one additional dma memory copy with the doorbell register as the
+ * destination, after the memory copy operations.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
+ phys_addr_t *db_addr,
+ resource_size_t *db_size)
+{
+ return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+}
+
+/**
+ * ntb_peer_db_read() - read the peer doorbell register
+ * @ntb: NTB device context.
+ *
+ * Read the peer doorbell register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell register.
+ */
+static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->peer_db_read)
+ return 0;
+
+ return ntb->ops->peer_db_read(ntb);
+}
+
+/**
+ * ntb_peer_db_set() - set bits in the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to set.
+ *
+ * Set bits in the peer doorbell register, which may generate a peer doorbell
+ * interrupt. Bits that were already set must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->peer_db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear() - clear bits in the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_clear)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_read_mask() - read the peer doorbell mask
+ * @ntb: NTB device context.
+ *
+ * Read the peer doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell mask register.
+ */
+static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_read_mask)
+ return 0;
+
+ return ntb->ops->peer_db_read_mask(ntb);
+}
+
+/**
+ * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * Set bits in the peer doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits. Bits that were already set
+ * must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_set_mask)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits. If the hardware does not
+ * generate the interrupt on clearing the mask bit, then the driver should not
+ * implement this function!
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_clear_mask)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
+ * @ntb: NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata. Hardware
+ * drivers can advise clients to avoid using scratchpads. Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
+ */
+static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->spad_is_unsafe)
+ return 0;
+
+ return ntb->ops->spad_is_unsafe(ntb);
+}
+
+/**
+ * ntb_mw_count() - get the number of scratchpads
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of scratchpads.
+ *
+ * Return: the number of scratchpads.
+ */
+static inline int ntb_spad_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->spad_count(ntb);
+}
+
+/**
+ * ntb_spad_read() - read the local scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ *
+ * Read the local scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+ return ntb->ops->spad_read(ntb, idx);
+}
+
+/**
+ * ntb_spad_write() - write the local scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @val: Scratchpad value.
+ *
+ * Write the value to the local scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ return ntb->ops->spad_write(ntb, idx, val);
+}
+
+/**
+ * ntb_peer_spad_addr() - address of the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @spad_addr: OUT - The address of the peer scratchpad register.
+ *
+ * Return the address of the peer doorbell register. This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *spad_addr)
+{
+ return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
+}
+
+/**
+ * ntb_peer_spad_read() - read the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ *
+ * Read the peer scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+{
+ return ntb->ops->peer_spad_read(ntb, idx);
+}
+
+/**
+ * ntb_peer_spad_write() - write the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @val: Scratchpad value.
+ *
+ * Write the value to the peer scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ return ntb->ops->peer_spad_write(ntb, idx, val);
+}
+
+#endif
diff --git a/kernel/include/linux/ntb_transport.h b/kernel/include/linux/ntb_transport.h
new file mode 100644
index 000000000..7243eb98a
--- /dev/null
+++ b/kernel/include/linux/ntb_transport.h
@@ -0,0 +1,86 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Transport Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_transport_client {
+ struct device_driver driver;
+ int (*probe)(struct device *client_dev);
+ void (*remove)(struct device *client_dev);
+};
+
+int ntb_transport_register_client(struct ntb_transport_client *drvr);
+void ntb_transport_unregister_client(struct ntb_transport_client *drvr);
+int ntb_transport_register_client_dev(char *device_name);
+void ntb_transport_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+ void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*event_handler)(void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct device *client_dev,
+ const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
diff --git a/kernel/include/linux/nvme.h b/kernel/include/linux/nvme.h
index 8dbd05e70..3af5f454c 100644
--- a/kernel/include/linux/nvme.h
+++ b/kernel/include/linux/nvme.h
@@ -15,10 +15,7 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
-#include <uapi/linux/nvme.h>
-#include <linux/pci.h>
-#include <linux/kref.h>
-#include <linux/blk-mq.h>
+#include <linux/types.h>
struct nvme_bar {
__u64 cap; /* Controller Capabilities */
@@ -28,18 +25,32 @@ struct nvme_bar {
__u32 cc; /* Controller Configuration */
__u32 rsvd1; /* Reserved */
__u32 csts; /* Controller Status */
- __u32 rsvd2; /* Reserved */
+ __u32 nssr; /* Subsystem Reset */
__u32 aqa; /* Admin Queue Attributes */
__u64 asq; /* Admin SQ Base Address */
__u64 acq; /* Admin CQ Base Address */
+ __u32 cmbloc; /* Controller Memory Buffer Location */
+ __u32 cmbsz; /* Controller Memory Buffer Size */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
@@ -55,125 +66,535 @@ enum {
NVME_CC_IOCQES = 4 << 20,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
+ NVME_CSTS_NSSRO = 1 << 4,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
};
-extern unsigned char nvme_io_timeout;
-#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
+struct nvme_id_power_state {
+ __le16 max_power; /* centiwatts */
+ __u8 rsvd2;
+ __u8 flags;
+ __le32 entry_lat; /* microseconds */
+ __le32 exit_lat; /* microseconds */
+ __u8 read_tput;
+ __u8 read_lat;
+ __u8 write_tput;
+ __u8 write_lat;
+ __le16 idle_power;
+ __u8 idle_scale;
+ __u8 rsvd19;
+ __le16 active_power;
+ __u8 active_work_scale;
+ __u8 rsvd23[9];
+};
-/*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- struct request_queue *admin_q;
- struct blk_mq_tag_set tagset;
- struct blk_mq_tag_set admin_tagset;
- u32 __iomem *dbs;
- struct pci_dev *pci_dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- unsigned queue_count;
- unsigned online_queues;
- unsigned max_qid;
- int q_depth;
- u32 db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- struct kref kref;
- struct device *device;
- work_func_t reset_workfn;
- struct work_struct reset_work;
- struct work_struct probe_work;
- char name[12];
- char serial[20];
- char model[40];
- char firmware_rev[8];
- u32 max_hw_sectors;
- u32 stripe_size;
- u32 page_size;
- u16 oncs;
- u16 abort_limit;
- u8 event_limit;
- u8 vwc;
+enum {
+ NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
+ NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
};
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
- struct list_head list;
+struct nvme_id_ctrl {
+ __le16 vid;
+ __le16 ssvid;
+ char sn[20];
+ char mn[40];
+ char fr[8];
+ __u8 rab;
+ __u8 ieee[3];
+ __u8 mic;
+ __u8 mdts;
+ __le16 cntlid;
+ __le32 ver;
+ __u8 rsvd84[172];
+ __le16 oacs;
+ __u8 acl;
+ __u8 aerl;
+ __u8 frmw;
+ __u8 lpa;
+ __u8 elpe;
+ __u8 npss;
+ __u8 avscc;
+ __u8 apsta;
+ __le16 wctemp;
+ __le16 cctemp;
+ __u8 rsvd270[242];
+ __u8 sqes;
+ __u8 cqes;
+ __u8 rsvd514[2];
+ __le32 nn;
+ __le16 oncs;
+ __le16 fuses;
+ __u8 fna;
+ __u8 vwc;
+ __le16 awun;
+ __le16 awupf;
+ __u8 nvscc;
+ __u8 rsvd531;
+ __le16 acwu;
+ __u8 rsvd534[2];
+ __le32 sgls;
+ __u8 rsvd540[1508];
+ struct nvme_id_power_state psd[32];
+ __u8 vs[1024];
+};
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
+enum {
+ NVME_CTRL_ONCS_COMPARE = 1 << 0,
+ NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
+ NVME_CTRL_ONCS_DSM = 1 << 2,
+ NVME_CTRL_VWC_PRESENT = 1 << 0,
+};
- unsigned ns_id;
- int lba_shift;
- u16 ms;
- bool ext;
- u8 pi_type;
- u64 mode_select_num_blocks;
- u32 mode_select_block_len;
+struct nvme_lbaf {
+ __le16 ms;
+ __u8 ds;
+ __u8 rp;
};
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
- unsigned long private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
- struct scatterlist sg[0];
-};
-
-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
-{
- return (sector >> (ns->lba_shift - 9));
-}
-
-/**
- * nvme_free_iod - frees an nvme_iod
- * @dev: The device that the I/O was submitted to
- * @iod: The memory to free
- */
-void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
-
-int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
-struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, unsigned length);
-void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- struct nvme_iod *iod);
-int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
- struct nvme_command *, u32 *);
-int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
-int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
- u32 *result);
-int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
- dma_addr_t dma_addr);
-int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result);
-int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result);
-
-struct sg_io_hdr;
-
-int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
-int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
-int nvme_sg_get_version_num(int __user *ip);
+struct nvme_id_ns {
+ __le64 nsze;
+ __le64 ncap;
+ __le64 nuse;
+ __u8 nsfeat;
+ __u8 nlbaf;
+ __u8 flbas;
+ __u8 mc;
+ __u8 dpc;
+ __u8 dps;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __u8 rsvd33;
+ __le16 nawun;
+ __le16 nawupf;
+ __le16 nacwu;
+ __le16 nabsn;
+ __le16 nabo;
+ __le16 nabspf;
+ __u16 rsvd46;
+ __le64 nvmcap[2];
+ __u8 rsvd64[40];
+ __u8 nguid[16];
+ __u8 eui64[8];
+ struct nvme_lbaf lbaf[16];
+ __u8 rsvd192[192];
+ __u8 vs[3712];
+};
+
+enum {
+ NVME_NS_FEAT_THIN = 1 << 0,
+ NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_META_EXT = 0x10,
+ NVME_LBAF_RP_BEST = 0,
+ NVME_LBAF_RP_BETTER = 1,
+ NVME_LBAF_RP_GOOD = 2,
+ NVME_LBAF_RP_DEGRADED = 3,
+ NVME_NS_DPC_PI_LAST = 1 << 4,
+ NVME_NS_DPC_PI_FIRST = 1 << 3,
+ NVME_NS_DPC_PI_TYPE3 = 1 << 2,
+ NVME_NS_DPC_PI_TYPE2 = 1 << 1,
+ NVME_NS_DPC_PI_TYPE1 = 1 << 0,
+ NVME_NS_DPS_PI_FIRST = 1 << 3,
+ NVME_NS_DPS_PI_MASK = 0x7,
+ NVME_NS_DPS_PI_TYPE1 = 1,
+ NVME_NS_DPS_PI_TYPE2 = 2,
+ NVME_NS_DPS_PI_TYPE3 = 3,
+};
+
+struct nvme_smart_log {
+ __u8 critical_warning;
+ __u8 temperature[2];
+ __u8 avail_spare;
+ __u8 spare_thresh;
+ __u8 percent_used;
+ __u8 rsvd6[26];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 host_reads[16];
+ __u8 host_writes[16];
+ __u8 ctrl_busy_time[16];
+ __u8 power_cycles[16];
+ __u8 power_on_hours[16];
+ __u8 unsafe_shutdowns[16];
+ __u8 media_errors[16];
+ __u8 num_err_log_entries[16];
+ __le32 warning_temp_time;
+ __le32 critical_comp_time;
+ __le16 temp_sensor[8];
+ __u8 rsvd216[296];
+};
+
+enum {
+ NVME_SMART_CRIT_SPARE = 1 << 0,
+ NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
+ NVME_SMART_CRIT_RELIABILITY = 1 << 2,
+ NVME_SMART_CRIT_MEDIA = 1 << 3,
+ NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
+};
+
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x0002,
+};
+
+struct nvme_lba_range_type {
+ __u8 type;
+ __u8 attributes;
+ __u8 rsvd2[14];
+ __u64 slba;
+ __u64 nlb;
+ __u8 guid[16];
+ __u8 rsvd48[16];
+};
+
+enum {
+ NVME_LBART_TYPE_FS = 0x01,
+ NVME_LBART_TYPE_RAID = 0x02,
+ NVME_LBART_TYPE_CACHE = 0x03,
+ NVME_LBART_TYPE_SWAP = 0x04,
+
+ NVME_LBART_ATTRIB_TEMP = 1 << 0,
+ NVME_LBART_ATTRIB_HIDE = 1 << 1,
+};
+
+struct nvme_reservation_status {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[13];
+ struct {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 resv3[5];
+ __le64 hostid;
+ __le64 rkey;
+ } regctl_ds[];
+};
+
+/* I/O commands */
+
+enum nvme_opcode {
+ nvme_cmd_flush = 0x00,
+ nvme_cmd_write = 0x01,
+ nvme_cmd_read = 0x02,
+ nvme_cmd_write_uncor = 0x04,
+ nvme_cmd_compare = 0x05,
+ nvme_cmd_write_zeroes = 0x08,
+ nvme_cmd_dsm = 0x09,
+ nvme_cmd_resv_register = 0x0d,
+ nvme_cmd_resv_report = 0x0e,
+ nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_resv_release = 0x15,
+};
+
+struct nvme_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le32 cdw2[2];
+ __le64 metadata;
+ __le64 prp1;
+ __le64 prp2;
+ __le32 cdw10[6];
+};
+
+struct nvme_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2;
+ __le64 metadata;
+ __le64 prp1;
+ __le64 prp2;
+ __le64 slba;
+ __le16 length;
+ __le16 control;
+ __le32 dsmgmt;
+ __le32 reftag;
+ __le16 apptag;
+ __le16 appmask;
+};
+
+enum {
+ NVME_RW_LR = 1 << 15,
+ NVME_RW_FUA = 1 << 14,
+ NVME_RW_DSM_FREQ_UNSPEC = 0,
+ NVME_RW_DSM_FREQ_TYPICAL = 1,
+ NVME_RW_DSM_FREQ_RARE = 2,
+ NVME_RW_DSM_FREQ_READS = 3,
+ NVME_RW_DSM_FREQ_WRITES = 4,
+ NVME_RW_DSM_FREQ_RW = 5,
+ NVME_RW_DSM_FREQ_ONCE = 6,
+ NVME_RW_DSM_FREQ_PREFETCH = 7,
+ NVME_RW_DSM_FREQ_TEMP = 8,
+ NVME_RW_DSM_LATENCY_NONE = 0 << 4,
+ NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
+ NVME_RW_DSM_LATENCY_NORM = 2 << 4,
+ NVME_RW_DSM_LATENCY_LOW = 3 << 4,
+ NVME_RW_DSM_SEQ_REQ = 1 << 6,
+ NVME_RW_DSM_COMPRESSED = 1 << 7,
+ NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
+ NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
+ NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
+ NVME_RW_PRINFO_PRACT = 1 << 13,
+};
+
+struct nvme_dsm_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[2];
+ __le64 prp1;
+ __le64 prp2;
+ __le32 nr;
+ __le32 attributes;
+ __u32 rsvd12[4];
+};
+
+enum {
+ NVME_DSMGMT_IDR = 1 << 0,
+ NVME_DSMGMT_IDW = 1 << 1,
+ NVME_DSMGMT_AD = 1 << 2,
+};
+
+struct nvme_dsm_range {
+ __le32 cattr;
+ __le32 nlb;
+ __le64 slba;
+};
+
+/* Admin commands */
+
+enum nvme_admin_opcode {
+ nvme_admin_delete_sq = 0x00,
+ nvme_admin_create_sq = 0x01,
+ nvme_admin_get_log_page = 0x02,
+ nvme_admin_delete_cq = 0x04,
+ nvme_admin_create_cq = 0x05,
+ nvme_admin_identify = 0x06,
+ nvme_admin_abort_cmd = 0x08,
+ nvme_admin_set_features = 0x09,
+ nvme_admin_get_features = 0x0a,
+ nvme_admin_async_event = 0x0c,
+ nvme_admin_activate_fw = 0x10,
+ nvme_admin_download_fw = 0x11,
+ nvme_admin_format_nvm = 0x80,
+ nvme_admin_security_send = 0x81,
+ nvme_admin_security_recv = 0x82,
+};
+
+enum {
+ NVME_QUEUE_PHYS_CONTIG = (1 << 0),
+ NVME_CQ_IRQ_ENABLED = (1 << 1),
+ NVME_SQ_PRIO_URGENT = (0 << 1),
+ NVME_SQ_PRIO_HIGH = (1 << 1),
+ NVME_SQ_PRIO_MEDIUM = (2 << 1),
+ NVME_SQ_PRIO_LOW = (3 << 1),
+ NVME_FEAT_ARBITRATION = 0x01,
+ NVME_FEAT_POWER_MGMT = 0x02,
+ NVME_FEAT_LBA_RANGE = 0x03,
+ NVME_FEAT_TEMP_THRESH = 0x04,
+ NVME_FEAT_ERR_RECOVERY = 0x05,
+ NVME_FEAT_VOLATILE_WC = 0x06,
+ NVME_FEAT_NUM_QUEUES = 0x07,
+ NVME_FEAT_IRQ_COALESCE = 0x08,
+ NVME_FEAT_IRQ_CONFIG = 0x09,
+ NVME_FEAT_WRITE_ATOMIC = 0x0a,
+ NVME_FEAT_ASYNC_EVENT = 0x0b,
+ NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_SW_PROGRESS = 0x80,
+ NVME_FEAT_HOST_ID = 0x81,
+ NVME_FEAT_RESV_MASK = 0x82,
+ NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_LOG_ERROR = 0x01,
+ NVME_LOG_SMART = 0x02,
+ NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_RESERVATION = 0x80,
+ NVME_FWACT_REPL = (0 << 3),
+ NVME_FWACT_REPL_ACTV = (1 << 3),
+ NVME_FWACT_ACTV = (2 << 3),
+};
+
+struct nvme_identify {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[2];
+ __le64 prp1;
+ __le64 prp2;
+ __le32 cns;
+ __u32 rsvd11[5];
+};
+
+struct nvme_features {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[2];
+ __le64 prp1;
+ __le64 prp2;
+ __le32 fid;
+ __le32 dword11;
+ __u32 rsvd12[4];
+};
+
+struct nvme_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct nvme_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct nvme_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct nvme_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 sqid;
+ __u16 cid;
+ __u32 rsvd11[5];
+};
+
+struct nvme_download_firmware {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __le64 prp2;
+ __le32 numd;
+ __le32 offset;
+ __u32 rsvd12[4];
+};
+
+struct nvme_format_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[4];
+ __le32 cdw10;
+ __u32 rsvd11[5];
+};
+
+struct nvme_command {
+ union {
+ struct nvme_common_command common;
+ struct nvme_rw_command rw;
+ struct nvme_identify identify;
+ struct nvme_features features;
+ struct nvme_create_cq create_cq;
+ struct nvme_create_sq create_sq;
+ struct nvme_delete_queue delete_queue;
+ struct nvme_download_firmware dlfw;
+ struct nvme_format_cmd format;
+ struct nvme_dsm_cmd dsm;
+ struct nvme_abort_cmd abort;
+ };
+};
+
+enum {
+ NVME_SC_SUCCESS = 0x0,
+ NVME_SC_INVALID_OPCODE = 0x1,
+ NVME_SC_INVALID_FIELD = 0x2,
+ NVME_SC_CMDID_CONFLICT = 0x3,
+ NVME_SC_DATA_XFER_ERROR = 0x4,
+ NVME_SC_POWER_LOSS = 0x5,
+ NVME_SC_INTERNAL = 0x6,
+ NVME_SC_ABORT_REQ = 0x7,
+ NVME_SC_ABORT_QUEUE = 0x8,
+ NVME_SC_FUSED_FAIL = 0x9,
+ NVME_SC_FUSED_MISSING = 0xa,
+ NVME_SC_INVALID_NS = 0xb,
+ NVME_SC_CMD_SEQ_ERROR = 0xc,
+ NVME_SC_SGL_INVALID_LAST = 0xd,
+ NVME_SC_SGL_INVALID_COUNT = 0xe,
+ NVME_SC_SGL_INVALID_DATA = 0xf,
+ NVME_SC_SGL_INVALID_METADATA = 0x10,
+ NVME_SC_SGL_INVALID_TYPE = 0x11,
+ NVME_SC_LBA_RANGE = 0x80,
+ NVME_SC_CAP_EXCEEDED = 0x81,
+ NVME_SC_NS_NOT_READY = 0x82,
+ NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_CQ_INVALID = 0x100,
+ NVME_SC_QID_INVALID = 0x101,
+ NVME_SC_QUEUE_SIZE = 0x102,
+ NVME_SC_ABORT_LIMIT = 0x103,
+ NVME_SC_ABORT_MISSING = 0x104,
+ NVME_SC_ASYNC_LIMIT = 0x105,
+ NVME_SC_FIRMWARE_SLOT = 0x106,
+ NVME_SC_FIRMWARE_IMAGE = 0x107,
+ NVME_SC_INVALID_VECTOR = 0x108,
+ NVME_SC_INVALID_LOG_PAGE = 0x109,
+ NVME_SC_INVALID_FORMAT = 0x10a,
+ NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_INVALID_QUEUE = 0x10c,
+ NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
+ NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
+ NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
+ NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+ NVME_SC_BAD_ATTRIBUTES = 0x180,
+ NVME_SC_INVALID_PI = 0x181,
+ NVME_SC_READ_ONLY = 0x182,
+ NVME_SC_WRITE_FAULT = 0x280,
+ NVME_SC_READ_ERROR = 0x281,
+ NVME_SC_GUARD_CHECK = 0x282,
+ NVME_SC_APPTAG_CHECK = 0x283,
+ NVME_SC_REFTAG_CHECK = 0x284,
+ NVME_SC_COMPARE_FAILED = 0x285,
+ NVME_SC_ACCESS_DENIED = 0x286,
+ NVME_SC_DNR = 0x4000,
+};
+
+struct nvme_completion {
+ __le32 result; /* Used by admin commands to return data */
+ __u32 rsvd;
+ __le16 sq_head; /* how much of this queue may be reclaimed */
+ __le16 sq_id; /* submission queue that generated this entry */
+ __u16 command_id; /* of the command which completed */
+ __le16 status; /* did the command fail, and if so, why? */
+};
+
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
#endif /* _LINUX_NVME_H */
diff --git a/kernel/include/linux/nvmem-consumer.h b/kernel/include/linux/nvmem-consumer.h
new file mode 100644
index 000000000..9bb77d3ed
--- /dev/null
+++ b/kernel/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
+/*
+ * nvmem framework consumer.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_CONSUMER_H
+#define _LINUX_NVMEM_CONSUMER_H
+
+struct device;
+struct device_node;
+/* consumer cookie */
+struct nvmem_cell;
+struct nvmem_device;
+
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+/* Cell based interface */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+
+/* direct nvmem device read/write interface */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+
+#else
+
+static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_nvmem_cell_put(struct device *dev,
+ struct nvmem_cell *cell)
+{
+
+}
+static inline void nvmem_cell_put(struct nvmem_cell *cell)
+{
+}
+
+static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_cell_write(struct nvmem_cell *cell,
+ const char *buf, size_t len)
+{
+ return -ENOSYS;
+}
+
+static inline struct nvmem_device *nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void nvmem_device_put(struct nvmem_device *nvmem)
+{
+}
+
+static inline void devm_nvmem_device_put(struct device *dev,
+ struct nvmem_device *nvmem)
+{
+}
+
+static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_read(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_write(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name);
+struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name);
+#else
+static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
+#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/kernel/include/linux/nvmem-provider.h b/kernel/include/linux/nvmem-provider.h
new file mode 100644
index 000000000..0b68caff1
--- /dev/null
+++ b/kernel/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
+/*
+ * nvmem framework provider.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_PROVIDER_H
+#define _LINUX_NVMEM_PROVIDER_H
+
+struct nvmem_device;
+struct nvmem_cell_info;
+
+struct nvmem_config {
+ struct device *dev;
+ const char *name;
+ int id;
+ struct module *owner;
+ const struct nvmem_cell_info *cells;
+ int ncells;
+ bool read_only;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
+int nvmem_unregister(struct nvmem_device *nvmem);
+
+#else
+
+static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_unregister(struct nvmem_device *nvmem)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_NVMEM */
+
+#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/kernel/include/linux/nx842.h b/kernel/include/linux/nx842.h
deleted file mode 100644
index a4d324c64..000000000
--- a/kernel/include/linux/nx842.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NX842_H__
-#define __NX842_H__
-
-int nx842_get_workmem_size(void);
-int nx842_get_workmem_size_aligned(void);
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-
-#endif
diff --git a/kernel/include/linux/of.h b/kernel/include/linux/of.h
index 8135d507d..dd10626a6 100644
--- a/kernel/include/linux/of.h
+++ b/kernel/include/linux/of.h
@@ -120,6 +120,14 @@ extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
+/* flag descriptions (need to be visible even when !CONFIG_OF) */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+#define OF_DETACHED 2 /* node has been detached from the device tree */
+#define OF_POPULATED 3 /* device already created for the node */
+#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+
+#define OF_BAD_ADDR ((u64)-1)
+
#ifdef CONFIG_OF
void of_core_init(void);
@@ -128,9 +136,10 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
return fwnode && fwnode->type == FWNODE_OF;
}
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+ return is_of_node(fwnode) ?
+ container_of(fwnode, struct device_node, fwnode) : NULL;
}
static inline bool of_have_populated_dt(void)
@@ -219,17 +228,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
-/* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED 2 /* node has been detached from the device tree */
-#define OF_POPULATED 3 /* device already created for the node */
-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-#define OF_BAD_ADDR ((u64)-1)
-
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -387,7 +388,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
return false;
}
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -428,6 +429,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path,
return NULL;
}
+static inline struct device_node *of_find_node_by_phandle(phandle handle)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
@@ -824,7 +830,7 @@ static inline int of_property_read_string_index(struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
- * Returns true if the property exist false otherwise.
+ * Returns true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
diff --git a/kernel/include/linux/of_address.h b/kernel/include/linux/of_address.h
index d88e81be6..507daad0b 100644
--- a/kernel/include/linux/of_address.h
+++ b/kernel/include/linux/of_address.h
@@ -57,6 +57,13 @@ extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
+
+static inline u64 of_translate_address(struct device_node *np,
+ const __be32 *addr)
+{
+ return OF_BAD_ADDR;
+}
+
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
diff --git a/kernel/include/linux/of_device.h b/kernel/include/linux/of_device.h
index 22801b10c..cc7dd687a 100644
--- a/kernel/include/linux/of_device.h
+++ b/kernel/include/linux/of_device.h
@@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev);
extern int of_device_register(struct platform_device *ofdev);
extern void of_device_unregister(struct platform_device *ofdev);
+extern const void *of_device_get_match_data(const struct device *dev);
+
extern ssize_t of_device_get_modalias(struct device *dev,
char *str, ssize_t len);
@@ -57,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
return 0;
}
@@ -65,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev,
static inline void of_device_uevent(struct device *dev,
struct kobj_uevent_env *env) { }
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline int of_device_get_modalias(struct device *dev,
char *str, ssize_t len)
{
diff --git a/kernel/include/linux/of_dma.h b/kernel/include/linux/of_dma.h
index 56bc026c1..b90d8ec57 100644
--- a/kernel/include/linux/of_dma.h
+++ b/kernel/include/linux/of_dma.h
@@ -23,6 +23,9 @@ struct of_dma {
struct device_node *of_node;
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *);
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *);
+ struct dma_router *dma_router;
void *of_dma_data;
};
@@ -31,18 +34,26 @@ struct of_dma_filter_info {
dma_filter_fn filter_fn;
};
-#ifdef CONFIG_OF
+#ifdef CONFIG_DMA_OF
extern int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
void *data);
extern void of_dma_controller_free(struct device_node *np);
+
+extern int of_dma_router_register(struct device_node *np,
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *),
+ struct dma_router *dma_router);
+#define of_dma_router_free of_dma_controller_free
+
extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name);
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
+
#else
static inline int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
@@ -56,10 +67,20 @@ static inline void of_dma_controller_free(struct device_node *np)
{
}
+static inline int of_dma_router_register(struct device_node *np,
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *),
+ struct dma_router *dma_router)
+{
+ return -ENODEV;
+}
+
+#define of_dma_router_free of_dma_controller_free
+
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/kernel/include/linux/of_fdt.h b/kernel/include/linux/of_fdt.h
index 587ee5079..df9ef3801 100644
--- a/kernel/include/linux/of_fdt.h
+++ b/kernel/include/linux/of_fdt.h
@@ -37,7 +37,7 @@ extern bool of_fdt_is_big_endian(const void *blob,
unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
-extern void of_fdt_unflatten_tree(unsigned long *blob,
+extern void of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node **mynodes);
/* TBD: Temporary export of fdt globals - remove when code fully merged */
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/kernel/include/linux/of_gpio.h b/kernel/include/linux/of_gpio.h
index 69dbe312b..87d6d1632 100644
--- a/kernel/include/linux/of_gpio.h
+++ b/kernel/include/linux/of_gpio.h
@@ -29,6 +29,7 @@ struct device_node;
*/
enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
+ OF_GPIO_SINGLE_ENDED = 0x2,
};
#ifdef CONFIG_OF_GPIO
@@ -54,7 +55,7 @@ extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
@@ -76,7 +77,7 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
return -ENOSYS;
}
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
#endif /* CONFIG_OF_GPIO */
diff --git a/kernel/include/linux/of_graph.h b/kernel/include/linux/of_graph.h
index 7bc92e050..f8bcd0e21 100644
--- a/kernel/include/linux/of_graph.h
+++ b/kernel/include/linux/of_graph.h
@@ -45,6 +45,8 @@ int of_graph_parse_endpoint(const struct device_node *node,
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
+struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg);
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node);
struct device_node *of_graph_get_remote_port(const struct device_node *node);
@@ -69,6 +71,12 @@ static inline struct device_node *of_graph_get_next_endpoint(
return NULL;
}
+static inline struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
diff --git a/kernel/include/linux/of_irq.h b/kernel/include/linux/of_irq.h
index d884929a7..1e0deb8e8 100644
--- a/kernel/include/linux/of_irq.h
+++ b/kernel/include/linux/of_irq.h
@@ -46,6 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern struct irq_domain *of_msi_get_domain(struct device *dev,
+ struct device_node *np,
+ enum irq_domain_bus_token token);
+extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+ u32 rid);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -64,28 +72,46 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
{
return 0;
}
+static inline void *of_irq_find_parent(struct device_node *child)
+{
+ return NULL;
+}
+
+static inline struct irq_domain *of_msi_get_domain(struct device *dev,
+ struct device_node *np,
+ enum irq_domain_bus_token token)
+{
+ return NULL;
+}
+static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+ u32 rid)
+{
+ return NULL;
+}
+static inline void of_msi_configure(struct device *dev, struct device_node *np)
+{
+}
+static inline u32 of_msi_map_rid(struct device *dev,
+ struct device_node *msi_np, u32 rid_in)
+{
+ return rid_in;
+}
#endif
-#if defined(CONFIG_OF)
+#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
/*
* irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
* implements it differently. However, the prototype is the same for all,
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
-#else /* !CONFIG_OF */
+#else /* !CONFIG_OF && !CONFIG_SPARC */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
int index)
{
return 0;
}
-
-static inline void *of_irq_find_parent(struct device_node *child)
-{
- return NULL;
-}
#endif /* !CONFIG_OF */
#endif /* __OF_IRQ_H */
diff --git a/kernel/include/linux/of_pci.h b/kernel/include/linux/of_pci.h
index 29fd3fe1c..2c51ee78b 100644
--- a/kernel/include/linux/of_pci.h
+++ b/kernel/include/linux/of_pci.h
@@ -16,7 +16,7 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
-void of_pci_dma_configure(struct pci_dev *pci_dev);
+void of_pci_check_probe_only(void);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -52,7 +52,7 @@ of_get_pci_domain_nr(struct device_node *node)
return -1;
}
-static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
+static inline void of_pci_check_probe_only(void) { }
#endif
#if defined(CONFIG_OF_ADDRESS)
diff --git a/kernel/include/linux/of_platform.h b/kernel/include/linux/of_platform.h
index 611a69114..956a1006a 100644
--- a/kernel/include/linux/of_platform.h
+++ b/kernel/include/linux/of_platform.h
@@ -72,6 +72,9 @@ extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent);
+extern int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent);
extern void of_platform_depopulate(struct device *parent);
#else
static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@ static inline int of_platform_populate(struct device_node *root,
{
return -ENODEV;
}
+static inline int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ return -ENODEV;
+}
static inline void of_platform_depopulate(struct device *parent) { }
#endif
diff --git a/kernel/include/linux/oid_registry.h b/kernel/include/linux/oid_registry.h
index c2bbf672b..d2fa9ca42 100644
--- a/kernel/include/linux/oid_registry.h
+++ b/kernel/include/linux/oid_registry.h
@@ -41,7 +41,7 @@ enum OID {
OID_signed_data, /* 1.2.840.113549.1.7.2 */
/* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
OID_email_address, /* 1.2.840.113549.1.9.1 */
- OID_content_type, /* 1.2.840.113549.1.9.3 */
+ OID_contentType, /* 1.2.840.113549.1.9.3 */
OID_messageDigest, /* 1.2.840.113549.1.9.4 */
OID_signingTime, /* 1.2.840.113549.1.9.5 */
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@ enum OID {
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
+ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
+ OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */
OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@ enum OID {
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
+ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
+ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
+ OID_sha224, /* 2.16.840.1.101.3.4.2.4 */
/* Distinguished Name attribute IDs [RFC 2256] */
OID_commonName, /* 2.5.4.3 */
diff --git a/kernel/include/linux/omap-dma.h b/kernel/include/linux/omap-dma.h
index e5a70132a..88fa8af2b 100644
--- a/kernel/include/linux/omap-dma.h
+++ b/kernel/include/linux/omap-dma.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
-#define INT_DMA_LCD 25
+#define INT_DMA_LCD (NR_IRQS_LEGACY + 25)
#define OMAP1_DMA_TOUT_IRQ (1 << 0)
#define OMAP_DMA_DROP_IRQ (1 << 1)
diff --git a/kernel/include/linux/once.h b/kernel/include/linux/once.h
new file mode 100644
index 000000000..285f12cb4
--- /dev/null
+++ b/kernel/include/linux/once.h
@@ -0,0 +1,57 @@
+#ifndef _LINUX_ONCE_H
+#define _LINUX_ONCE_H
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+
+bool __do_once_start(bool *done, unsigned long *flags);
+void __do_once_done(bool *done, struct static_key *once_key,
+ unsigned long *flags);
+
+/* Call a function exactly once. The idea of DO_ONCE() is to perform
+ * a function call such as initialization of random seeds, etc, only
+ * once, where DO_ONCE() can live in the fast-path. After @func has
+ * been called with the passed arguments, the static key will patch
+ * out the condition into a nop. DO_ONCE() guarantees type safety of
+ * arguments!
+ *
+ * Not that the following is not equivalent ...
+ *
+ * DO_ONCE(func, arg);
+ * DO_ONCE(func, arg);
+ *
+ * ... to this version:
+ *
+ * void foo(void)
+ * {
+ * DO_ONCE(func, arg);
+ * }
+ *
+ * foo();
+ * foo();
+ *
+ * In case the one-time invocation could be triggered from multiple
+ * places, then a common helper function must be defined, so that only
+ * a single static key will be placed there!
+ */
+#define DO_ONCE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
+ if (static_key_true(&___once_key)) { \
+ unsigned long ___flags; \
+ ___ret = __do_once_start(&___done, &___flags); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_done(&___done, &___once_key, \
+ &___flags); \
+ } \
+ } \
+ ___ret; \
+ })
+
+#define get_random_once(buf, nbytes) \
+ DO_ONCE(get_random_bytes, (buf), (nbytes))
+
+#endif /* _LINUX_ONCE_H */
diff --git a/kernel/include/linux/oom.h b/kernel/include/linux/oom.h
index 44b2f6f7b..03e625732 100644
--- a/kernel/include/linux/oom.h
+++ b/kernel/include/linux/oom.h
@@ -13,6 +13,27 @@ struct mem_cgroup;
struct task_struct;
/*
+ * Details of the page allocation that triggered the oom killer that are used to
+ * determine what should be killed.
+ */
+struct oom_control {
+ /* Used to determine cpuset */
+ struct zonelist *zonelist;
+
+ /* Used to determine mempolicy */
+ nodemask_t *nodemask;
+
+ /* Used to determine cpuset and node locality requirement */
+ const gfp_t gfp_mask;
+
+ /*
+ * order == -1 means the oom kill is required by sysrq, otherwise only
+ * for display purposes.
+ */
+ const int order;
+};
+
+/*
* Types of limitations to the nodes from which allocations may occur
*/
enum oom_constraint {
@@ -32,6 +53,8 @@ enum oom_scan_t {
/* Thread is the potential origin of an oom condition; kill first on oom */
#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
+extern struct mutex oom_lock;
+
static inline void set_current_oom_origin(void)
{
current->signal->oom_flags |= OOM_FLAG_ORIGIN;
@@ -47,9 +70,7 @@ static inline bool oom_task_origin(const struct task_struct *p)
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}
-extern void mark_tsk_oom_victim(struct task_struct *tsk);
-
-extern void unmark_oom_victim(void);
+extern void mark_oom_victim(struct task_struct *tsk);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
@@ -57,24 +78,21 @@ extern unsigned long oom_badness(struct task_struct *p,
extern int oom_kills_count(void);
extern void note_oom_kill(void);
-extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
- struct mem_cgroup *memcg, nodemask_t *nodemask,
- const char *message);
+ struct mem_cgroup *memcg, const char *message);
-extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
-
-extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask,
+extern void check_panic_on_oom(struct oom_control *oc,
+ enum oom_constraint constraint,
struct mem_cgroup *memcg);
-extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
- unsigned long totalpages, const nodemask_t *nodemask,
- bool force_kill);
+extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+ struct task_struct *task, unsigned long totalpages);
+
+extern bool out_of_memory(struct oom_control *oc);
+
+extern void exit_oom_victim(void);
-extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
- int order, nodemask_t *mask, bool force_kill);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/kernel/include/linux/osq_lock.h b/kernel/include/linux/osq_lock.h
index 3a6490e81..703ea5c30 100644
--- a/kernel/include/linux/osq_lock.h
+++ b/kernel/include/linux/osq_lock.h
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);
+static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+ return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+
#endif
diff --git a/kernel/include/linux/page-flags.h b/kernel/include/linux/page-flags.h
index f34e040b3..bb53c7b86 100644
--- a/kernel/include/linux/page-flags.h
+++ b/kernel/include/linux/page-flags.h
@@ -86,12 +86,7 @@ enum pageflags {
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */
-#ifdef CONFIG_PAGEFLAGS_EXTENDED
PG_head, /* A head page */
- PG_tail, /* A tail page */
-#else
- PG_compound, /* A compound page */
-#endif
PG_swapcache, /* Swap page: swp_entry_t in private */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
@@ -109,6 +104,10 @@ enum pageflags {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
PG_compound_lock,
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+ PG_young,
+ PG_idle,
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -252,7 +251,7 @@ PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
* Must use a macro here due to header dependency issues. page_zone() is not
* available at this point.
*/
-#define PageHighMem(__p) is_highmem(page_zone(__p))
+#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
#else
PAGEFLAG_FALSE(HighMem)
#endif
@@ -289,6 +288,13 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+TESTPAGEFLAG(Young, young)
+SETPAGEFLAG(Young, young)
+TESTCLEARFLAG(Young, young)
+PAGEFLAG(Idle, idle)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
@@ -387,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page)
test_set_page_writeback_keepwrite(page);
}
-#ifdef CONFIG_PAGEFLAGS_EXTENDED
-/*
- * System with lots of page flags available. This allows separate
- * flags for PageHead() and PageTail() checks of compound pages so that bit
- * tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths except arch/powerpc/mm/init_64.c
- * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
- * and avoid handling those in real mode.
- */
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
-__PAGEFLAG(Tail, tail)
-
-static inline int PageCompound(struct page *page)
-{
- return page->flags & ((1L << PG_head) | (1L << PG_tail));
-}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void ClearPageCompound(struct page *page)
+static inline int PageTail(struct page *page)
{
- BUG_ON(!PageHead(page));
- ClearPageHead(page);
+ return READ_ONCE(page->compound_head) & 1;
}
-#endif
-
-#define PG_head_mask ((1L << PG_head))
-
-#else
-/*
- * Reduce page flag use as much as possible by overlapping
- * compound page flags with the flags used for page cache pages. Possible
- * because PageCompound is always set for compound pages and not for
- * pages on the LRU and/or pagecache.
- */
-TESTPAGEFLAG(Compound, compound)
-__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
-
-/*
- * PG_reclaim is used in combination with PG_compound to mark the
- * head and tail of a compound page. This saves one page flag
- * but makes it impossible to use compound pages for the page cache.
- * The PG_reclaim bit would have to be used for reclaim or readahead
- * if compound pages enter the page cache.
- *
- * PG_compound & PG_reclaim => Tail page
- * PG_compound & ~PG_reclaim => Head page
- */
-#define PG_head_mask ((1L << PG_compound))
-#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
-static inline int PageHead(struct page *page)
+static inline void set_compound_head(struct page *page, struct page *head)
{
- return ((page->flags & PG_head_tail_mask) == PG_head_mask);
+ WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
}
-static inline int PageTail(struct page *page)
+static inline void clear_compound_head(struct page *page)
{
- return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+ WRITE_ONCE(page->compound_head, 0);
}
-static inline void __SetPageTail(struct page *page)
+static inline struct page *compound_head(struct page *page)
{
- page->flags |= PG_head_tail_mask;
+ unsigned long head = READ_ONCE(page->compound_head);
+
+ if (unlikely(head & 1))
+ return (struct page *) (head - 1);
+ return page;
}
-static inline void __ClearPageTail(struct page *page)
+static inline int PageCompound(struct page *page)
{
- page->flags &= ~PG_head_tail_mask;
-}
+ return PageHead(page) || PageTail(page);
+}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{
- BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
- clear_bit(PG_compound, &page->flags);
+ BUG_ON(!PageHead(page));
+ ClearPageHead(page);
}
#endif
-#endif /* !PAGEFLAGS_EXTENDED */
+#define PG_head_mask ((1L << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
@@ -631,15 +598,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
1 << PG_private | 1 << PG_private_2 | \
1 << PG_writeback | 1 << PG_reserved | \
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+ 1 << PG_unevictable | __PG_MLOCKED | \
__PG_COMPOUND_LOCK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have any flags set. It they are set,
+ * Pages being prepped should not have these flags set. It they are set,
* there has been a kernel bug or struct page corruption.
+ *
+ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
+ * alloc-free cycle to prevent from reusing the page.
*/
-#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
+#define PAGE_FLAGS_CHECK_AT_PREP \
+ (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
(1 << PG_private | 1 << PG_private_2)
diff --git a/kernel/include/linux/page-isolation.h b/kernel/include/linux/page-isolation.h
index 2dc1e1697..047d64706 100644
--- a/kernel/include/linux/page-isolation.h
+++ b/kernel/include/linux/page-isolation.h
@@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages);
-/*
- * Internal functions. Changes pageblock's migrate type.
- */
-int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
-void unset_migratetype_isolate(struct page *page, unsigned migratetype);
struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp);
diff --git a/kernel/include/linux/page_counter.h b/kernel/include/linux/page_counter.h
index 17fa4f8de..7e62920a3 100644
--- a/kernel/include/linux/page_counter.h
+++ b/kernel/include/linux/page_counter.h
@@ -36,9 +36,9 @@ static inline unsigned long page_counter_read(struct page_counter *counter)
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_try_charge(struct page_counter *counter,
- unsigned long nr_pages,
- struct page_counter **fail);
+bool page_counter_try_charge(struct page_counter *counter,
+ unsigned long nr_pages,
+ struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
int page_counter_limit(struct page_counter *counter, unsigned long limit);
int page_counter_memparse(const char *buf, const char *max,
diff --git a/kernel/include/linux/page_ext.h b/kernel/include/linux/page_ext.h
index c42981cd9..17f118a82 100644
--- a/kernel/include/linux/page_ext.h
+++ b/kernel/include/linux/page_ext.h
@@ -26,6 +26,10 @@ enum page_ext_flags {
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_YOUNG,
+ PAGE_EXT_IDLE,
+#endif
};
/*
diff --git a/kernel/include/linux/page_idle.h b/kernel/include/linux/page_idle.h
new file mode 100644
index 000000000..bf268fa92
--- /dev/null
+++ b/kernel/include/linux/page_idle.h
@@ -0,0 +1,110 @@
+#ifndef _LINUX_MM_PAGE_IDLE_H
+#define _LINUX_MM_PAGE_IDLE_H
+
+#include <linux/bitops.h>
+#include <linux/page-flags.h>
+#include <linux/page_ext.h>
+
+#ifdef CONFIG_IDLE_PAGE_TRACKING
+
+#ifdef CONFIG_64BIT
+static inline bool page_is_young(struct page *page)
+{
+ return PageYoung(page);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ SetPageYoung(page);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return TestClearPageYoung(page);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return PageIdle(page);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ SetPageIdle(page);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ ClearPageIdle(page);
+}
+#else /* !CONFIG_64BIT */
+/*
+ * If there is not enough space to store Idle and Young bits in page flags, use
+ * page ext flags instead.
+ */
+extern struct page_ext_operations page_idle_ops;
+
+static inline bool page_is_young(struct page *page)
+{
+ return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return test_and_clear_bit(PAGE_EXT_YOUNG,
+ &lookup_page_ext(page)->flags);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+#endif /* CONFIG_64BIT */
+
+#else /* !CONFIG_IDLE_PAGE_TRACKING */
+
+static inline bool page_is_young(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_young(struct page *page)
+{
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return false;
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_idle(struct page *page)
+{
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+}
+
+#endif /* CONFIG_IDLE_PAGE_TRACKING */
+
+#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/kernel/include/linux/page_owner.h b/kernel/include/linux/page_owner.h
index b48c3471c..cacaabea8 100644
--- a/kernel/include/linux/page_owner.h
+++ b/kernel/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
+extern gfp_t __get_page_owner_gfp(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
+
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ if (likely(!page_owner_inited))
+ return 0;
+
+ return __get_page_owner_gfp(page);
+}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ return 0;
+}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/kernel/include/linux/pageblock-flags.h b/kernel/include/linux/pageblock-flags.h
index 2baeee12f..e942558b3 100644
--- a/kernel/include/linux/pageblock-flags.h
+++ b/kernel/include/linux/pageblock-flags.h
@@ -44,7 +44,7 @@ enum pageblock_bits {
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Huge page sizes are variable */
-extern int pageblock_order;
+extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
diff --git a/kernel/include/linux/pagemap.h b/kernel/include/linux/pagemap.h
index 4b3736f70..26eabf5ec 100644
--- a/kernel/include/linux/pagemap.h
+++ b/kernel/include/linux/pagemap.h
@@ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
}
+/* Restricts the given gfp_mask to what the mapping allows. */
+static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+ gfp_t gfp_mask)
+{
+ return mapping_gfp_mask(mapping) & gfp_mask;
+}
+
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
@@ -651,7 +658,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+extern void __delete_from_page_cache(struct page *page, void *shadow,
+ struct mem_cgroup *memcg);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
@@ -670,4 +678,10 @@ static inline int add_to_page_cache(struct page *page,
return error;
}
+static inline unsigned long dir_pages(struct inode *inode)
+{
+ return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+}
+
#endif /* _LINUX_PAGEMAP_H */
diff --git a/kernel/include/linux/parport.h b/kernel/include/linux/parport.h
index c22f12547..58e3c64c6 100644
--- a/kernel/include/linux/parport.h
+++ b/kernel/include/linux/parport.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/irqreturn.h>
#include <linux/semaphore.h>
+#include <linux/device.h>
#include <asm/ptrace.h>
#include <uapi/linux/parport.h>
@@ -145,6 +146,8 @@ struct pardevice {
unsigned int flags;
struct pardevice *next;
struct pardevice *prev;
+ struct device dev;
+ bool devmodel;
struct parport_state *state; /* saved status over preemption */
wait_queue_head_t wait_q;
unsigned long int time;
@@ -156,6 +159,8 @@ struct pardevice {
void * sysctl_table;
};
+#define to_pardevice(n) container_of(n, struct pardevice, dev)
+
/* IEEE1284 information */
/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
@@ -195,7 +200,7 @@ struct parport {
* This may unfortulately be null if the
* port has a legacy driver.
*/
-
+ struct device bus_dev; /* to link with the bus */
struct parport *physport;
/* If this is a non-default mux
parport, i.e. we're a clone of a real
@@ -245,15 +250,26 @@ struct parport {
struct parport *slaves[3];
};
+#define to_parport_dev(n) container_of(n, struct parport, bus_dev)
+
#define DEFAULT_SPIN_TIME 500 /* us */
struct parport_driver {
const char *name;
void (*attach) (struct parport *);
void (*detach) (struct parport *);
+ void (*match_port)(struct parport *);
+ int (*probe)(struct pardevice *);
+ struct device_driver driver;
+ bool devmodel;
struct list_head list;
};
+#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
+
+int parport_bus_init(void);
+void parport_bus_exit(void);
+
/* parport_register_port registers a new parallel port at the given
address (if one does not already exist) and returns a pointer to it.
This entails claiming the I/O region, IRQ and DMA. NULL is returned
@@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port);
extern void parport_remove_port(struct parport *port);
/* Register a new high-level driver. */
-extern int parport_register_driver (struct parport_driver *);
+
+int __must_check __parport_register_driver(struct parport_driver *,
+ struct module *,
+ const char *mod_name);
+/*
+ * parport_register_driver must be a macro so that KBUILD_MODNAME can
+ * be expanded
+ */
+#define parport_register_driver(driver) \
+ __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
extern void parport_unregister_driver (struct parport_driver *);
+void parport_unregister_driver(struct parport_driver *);
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
@@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
/* Reference counting for ports. */
extern struct parport *parport_get_port (struct parport *);
extern void parport_put_port (struct parport *);
+void parport_del_port(struct parport *);
+
+struct pardev_cb {
+ int (*preempt)(void *);
+ void (*wakeup)(void *);
+ void *private;
+ void (*irq_func)(void *);
+ unsigned int flags;
+};
/* parport_register_device declares that a device is connected to a
port, and tells the kernel all it needs to know.
@@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port,
void (*irq_func)(void *),
int flags, void *handle);
+struct pardevice *
+parport_register_dev_model(struct parport *port, const char *name,
+ const struct pardev_cb *par_dev_cb, int cnt);
+
/* parport_unregister unlinks a device from the chain. */
extern void parport_unregister_device(struct pardevice *dev);
diff --git a/kernel/include/linux/pata_arasan_cf_data.h b/kernel/include/linux/pata_arasan_cf_data.h
index 3cc21c9cc..9fade5dd2 100644
--- a/kernel/include/linux/pata_arasan_cf_data.h
+++ b/kernel/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller platform data header file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/kernel/include/linux/pci-acpi.h b/kernel/include/linux/pci-acpi.h
index a965efa52..89ab0572d 100644
--- a/kernel/include/linux/pci-acpi.h
+++ b/kernel/include/linux/pci-acpi.h
@@ -52,6 +52,30 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
return ACPI_HANDLE(dev);
}
+struct acpi_pci_root;
+struct acpi_pci_root_ops;
+
+struct acpi_pci_root_info {
+ struct acpi_pci_root *root;
+ struct acpi_device *bridge;
+ struct acpi_pci_root_ops *ops;
+ struct list_head resources;
+ char name[16];
+};
+
+struct acpi_pci_root_ops {
+ struct pci_ops *pci_ops;
+ int (*init_info)(struct acpi_pci_root_info *info);
+ void (*release_info)(struct acpi_pci_root_info *info);
+ int (*prepare_resources)(struct acpi_pci_root_info *info);
+};
+
+extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info);
+extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
+ struct acpi_pci_root_ops *ops,
+ struct acpi_pci_root_info *info,
+ void *sd);
+
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
diff --git a/kernel/include/linux/pci-ats.h b/kernel/include/linux/pci-ats.h
index 72031785f..57e0b8250 100644
--- a/kernel/include/linux/pci-ats.h
+++ b/kernel/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
#include <linux/pci.h>
-/* Address Translation Service */
-struct pci_ats {
- int pos; /* capability position */
- int stu; /* Smallest Translation Unit */
- int qdep; /* Invalidate Queue Depth */
- int ref_cnt; /* Physical Function reference count */
- unsigned int is_enabled:1; /* Enable bit is set */
-};
-
-#ifdef CONFIG_PCI_ATS
-
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-
-/**
- * pci_ats_enabled - query the ATS status
- * @dev: the PCI device
- *
- * Returns 1 if ATS capability is enabled, or 0 if not.
- */
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return dev->ats && dev->ats->is_enabled;
-}
-
-#else /* CONFIG_PCI_ATS */
-
-static inline int pci_enable_ats(struct pci_dev *dev, int ps)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_ats(struct pci_dev *dev)
-{
-}
-
-static inline int pci_ats_queue_depth(struct pci_dev *dev)
-{
- return -ENODEV;
-}
-
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return 0;
-}
-
-#endif /* CONFIG_PCI_ATS */
-
#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/kernel/include/linux/pci.h b/kernel/include/linux/pci.h
index 6e935e5ea..6ae25aae8 100644
--- a/kernel/include/linux/pci.h
+++ b/kernel/include/linux/pci.h
@@ -345,6 +345,7 @@ struct pci_dev {
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Service */
unsigned int is_managed:1;
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
@@ -357,6 +358,7 @@ struct pci_dev {
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -367,7 +369,6 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI
- struct list_head msi_list;
const struct attribute_group **msi_irq_groups;
#endif
struct pci_vpd *vpd;
@@ -376,7 +377,9 @@ struct pci_dev {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
- struct pci_ats *ats; /* Address Translation Service */
+ u16 ats_cap; /* ATS Capability offset */
+ u8 ats_stu; /* ATS Smallest Translation Unit */
+ atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
#endif
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -409,9 +412,18 @@ struct pci_host_bridge {
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+ /* Resource alignment requirements */
+ resource_size_t (*align_resource)(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
+
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
void (*release_fn)(struct pci_host_bridge *),
void *release_data);
@@ -447,7 +459,8 @@ struct pci_bus {
struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */
- struct list_head slots; /* list of slots on this bus */
+ struct list_head slots; /* list of slots on this bus;
+ protected by pci_slot_mutex */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
struct resource busn_res; /* bus numbers routed to this bus */
@@ -739,10 +752,11 @@ struct pci_driver {
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
- PCIE_BUS_TUNE_OFF,
- PCIE_BUS_SAFE,
- PCIE_BUS_PERFORMANCE,
- PCIE_BUS_PEER2PEER,
+ PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
+ PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
+ PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
+ PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
+ PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
};
extern enum pcie_bus_config_types pcie_bus_config;
@@ -781,8 +795,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
void pcibios_scan_specific_bus(int busn);
struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
-struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
@@ -790,6 +802,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources,
+ struct msi_controller *msi);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
@@ -800,6 +816,11 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
void pci_destroy_slot(struct pci_slot *slot);
+#ifdef CONFIG_SYSFS
+void pci_dev_assign_slot(struct pci_dev *dev);
+#else
+static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
+#endif
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -808,6 +829,7 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
+struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@@ -966,6 +988,23 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
+static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
+{
+ pdev->irq = irq;
+ pdev->irq_managed = 1;
+}
+
+static inline void pci_reset_managed_irq(struct pci_dev *pdev)
+{
+ pdev->irq = 0;
+ pdev->irq_managed = 0;
+}
+
+static inline bool pci_has_managed_irq(struct pci_dev *pdev)
+{
+ return pdev->irq_managed && pdev->irq > 0;
+}
+
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
@@ -982,7 +1021,6 @@ void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
-void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1164,6 +1202,17 @@ void pci_unregister_driver(struct pci_driver *dev);
module_driver(__pci_driver, pci_register_driver, \
pci_unregister_driver)
+/**
+ * builtin_pci_driver() - Helper macro for registering a PCI driver
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for PCI drivers which do not do anything special in their
+ * init code. This eliminates a lot of boilerplate. Each driver may only
+ * use this macro once, and calling it replaces device_initcall(...)
+ */
+#define builtin_pci_driver(__pci_driver) \
+ builtin_driver(__pci_driver, pci_register_driver)
+
struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
int pci_add_dynid(struct pci_driver *drv,
unsigned int vendor, unsigned int device,
@@ -1199,22 +1248,16 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
dma_pool_create(name, &pdev->dev, size, align, allocation)
#define pci_pool_destroy(pool) dma_pool_destroy(pool)
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_zalloc(pool, flags, handle) \
+ dma_pool_zalloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-enum pci_dma_burst_strategy {
- PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
- strategy_parameter is N/A */
- PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
- byte boundaries */
- PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
- strategy_parameter byte boundaries */
-};
-
struct msix_entry {
u32 vector; /* kernel uses to write allocated vector */
u16 entry; /* driver uses to specify entry, OS writes */
};
+void pci_msi_setup_pci_dev(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
@@ -1307,6 +1350,19 @@ int ht_create_irq(struct pci_dev *dev, int idx);
void ht_destroy_irq(unsigned int irq);
#endif /* CONFIG_HT_IRQ */
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+void pci_ats_init(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+#else
+static inline void pci_ats_init(struct pci_dev *d) { }
+static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1434,8 +1490,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
-
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1660,6 +1714,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
+int pcibios_alloc_irq(struct pci_dev *dev);
+void pcibios_free_irq(struct pci_dev *dev);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1676,6 +1732,7 @@ static inline void pci_mmcfg_late_init(void) { }
int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1857,10 +1914,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
+struct irq_domain;
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1883,6 +1942,8 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
@@ -1909,4 +1970,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
{
return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
}
+
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @bus: the PCI bus
+ *
+ * Returns true if ARI forwarding is enabled.
+ */
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+ return bus->self && bus->self->ari_enabled;
+}
#endif /* LINUX_PCI_H */
diff --git a/kernel/include/linux/pci_ids.h b/kernel/include/linux/pci_ids.h
index 2f7b9a40f..d9ba49ced 100644
--- a/kernel/include/linux/pci_ids.h
+++ b/kernel/include/linux/pci_ids.h
@@ -579,6 +579,7 @@
#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -2329,6 +2330,17 @@
#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+
+#define PCI_VENDOR_ID_TECHWELL 0x1797
+#define PCI_DEVICE_ID_TECHWELL_6800 0x6800
+#define PCI_DEVICE_ID_TECHWELL_6801 0x6801
+#define PCI_DEVICE_ID_TECHWELL_6804 0x6804
+#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810
+#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811
+#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812
+#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
diff --git a/kernel/include/linux/percpu-defs.h b/kernel/include/linux/percpu-defs.h
index 57f3a1c55..8f16299ca 100644
--- a/kernel/include/linux/percpu-defs.h
+++ b/kernel/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@ do { \
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
/*
- * Operations with implied preemption protection. These operations can be
- * used without worrying about preemption. Note that interrupts may still
- * occur while an operation is in progress and if the interrupt modifies
- * the variable too then RMW actions may not be reliable.
+ * Operations with implied preemption/interrupt protection. These
+ * operations can be used without worrying about preemption or interrupt.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/kernel/include/linux/percpu-rwsem.h b/kernel/include/linux/percpu-rwsem.h
index 3e88c9a7d..c2fa3ecb0 100644
--- a/kernel/include/linux/percpu-rwsem.h
+++ b/kernel/include/linux/percpu-rwsem.h
@@ -5,17 +5,19 @@
#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/wait.h>
+#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
+ struct rcu_sync rss;
unsigned int __percpu *fast_read_ctr;
- atomic_t write_ctr;
struct rw_semaphore rw_sem;
atomic_t slow_read_ctr;
wait_queue_head_t write_waitq;
};
extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
extern void percpu_up_read(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +33,23 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(brw, #brw, &rwsem_key); \
})
+
+#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+
+static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_release(&sem->rw_sem.dep_map, 1, ip);
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ if (!read)
+ sem->rw_sem.owner = NULL;
+#endif
+}
+
+static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+}
+
#endif
diff --git a/kernel/include/linux/perf/arm_pmu.h b/kernel/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000..bfa673bb8
--- /dev/null
+++ b/kernel/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/include/asm/pmu.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ * interrupt and passed the address of the low level handler,
+ * and can be used to implement any platform specific handling
+ * before or after calling it.
+ */
+struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
+ }, \
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+ /*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+ raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
+};
+
+struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+ cpumask_t supported_cpus;
+ int *irq_affinity;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ int (*set_event_filter)(struct hw_perf_event *evt,
+ struct perf_event_attr *attr);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ u64 max_period;
+ struct platform_device *plat_device;
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
+
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/kernel/include/linux/perf_event.h b/kernel/include/linux/perf_event.h
index d8a82a89f..f9828a48f 100644
--- a/kernel/include/linux/perf_event.h
+++ b/kernel/include/linux/perf_event.h
@@ -120,7 +120,7 @@ struct hw_perf_event {
};
struct { /* intel_cqm */
int cqm_state;
- int cqm_rmid;
+ u32 cqm_rmid;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
@@ -140,33 +140,67 @@ struct hw_perf_event {
};
#endif
};
+ /*
+ * If the event is a per task event, this will point to the task in
+ * question. See the comment in perf_event_alloc().
+ */
struct task_struct *target;
+
+/*
+ * hw_perf_event::state flags; used to track the PERF_EF_* state.
+ */
+#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH 0x04
+
int state;
+
+ /*
+ * The last observed hardware counter value, updated with a
+ * local64_cmpxchg() such that pmu::read() can be called nested.
+ */
local64_t prev_count;
+
+ /*
+ * The period to start the next sample with.
+ */
u64 sample_period;
+
+ /*
+ * The period we started this sample with.
+ */
u64 last_period;
+
+ /*
+ * However much is left of the current period; note that this is
+ * a full 64bit value and allows for generation of periods longer
+ * than hardware might allow.
+ */
local64_t period_left;
+
+ /*
+ * State for throttling the event, see __perf_event_overflow() and
+ * perf_adjust_freq_unthr_context().
+ */
u64 interrupts_seq;
u64 interrupts;
+ /*
+ * State for freq target events, see __perf_event_overflow() and
+ * perf_adjust_freq_unthr_context().
+ */
u64 freq_time_stamp;
u64 freq_count_stamp;
#endif
};
-/*
- * hw_perf_event::state flags
- */
-#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH 0x04
-
struct perf_event;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
-#define PERF_EVENT_TXN 0x1
+#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
/**
* pmu::capabilities flags
@@ -210,7 +244,19 @@ struct pmu {
/*
* Try and initialize the event for this PMU.
- * Should return -ENOENT when the @event doesn't match this PMU.
+ *
+ * Returns:
+ * -ENOENT -- @event is not for this PMU
+ *
+ * -ENODEV -- @event is for this PMU but PMU not present
+ * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
+ * -EINVAL -- @event is for this PMU but @event is not valid
+ * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
+ * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
+ *
+ * 0 -- @event is for this PMU and valid
+ *
+ * Other error return values are allowed.
*/
int (*event_init) (struct perf_event *event);
@@ -221,27 +267,61 @@ struct pmu {
void (*event_mapped) (struct perf_event *event); /*optional*/
void (*event_unmapped) (struct perf_event *event); /*optional*/
+ /*
+ * Flags for ->add()/->del()/ ->start()/->stop(). There are
+ * matching hw_perf_event::state flags.
+ */
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
/*
- * Adds/Removes a counter to/from the PMU, can be done inside
- * a transaction, see the ->*_txn() methods.
+ * Adds/Removes a counter to/from the PMU, can be done inside a
+ * transaction, see the ->*_txn() methods.
+ *
+ * The add/del callbacks will reserve all hardware resources required
+ * to service the event, this includes any counter constraint
+ * scheduling etc.
+ *
+ * Called with IRQs disabled and the PMU disabled on the CPU the event
+ * is on.
+ *
+ * ->add() called without PERF_EF_START should result in the same state
+ * as ->add() followed by ->stop().
+ *
+ * ->del() must always PERF_EF_UPDATE stop an event. If it calls
+ * ->stop() that must deal with already being stopped without
+ * PERF_EF_UPDATE.
*/
int (*add) (struct perf_event *event, int flags);
void (*del) (struct perf_event *event, int flags);
/*
- * Starts/Stops a counter present on the PMU. The PMI handler
- * should stop the counter when perf_event_overflow() returns
- * !0. ->start() will be used to continue.
+ * Starts/Stops a counter present on the PMU.
+ *
+ * The PMI handler should stop the counter when perf_event_overflow()
+ * returns !0. ->start() will be used to continue.
+ *
+ * Also used to change the sample period.
+ *
+ * Called with IRQs disabled and the PMU disabled on the CPU the event
+ * is on -- will be called from NMI context with the PMU generates
+ * NMIs.
+ *
+ * ->stop() with PERF_EF_UPDATE will read the counter and update
+ * period/count values like ->read() would.
+ *
+ * ->start() with PERF_EF_RELOAD will reprogram the the counter
+ * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
/*
* Updates the counter value of the event.
+ *
+ * For sampling capable PMUs this will also update the software period
+ * hw_perf_event::period_left field.
*/
void (*read) (struct perf_event *event);
@@ -252,20 +332,26 @@ struct pmu {
*
* Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
+ *
+ * Optional.
*/
- void (*start_txn) (struct pmu *pmu); /* optional */
+ void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
/*
* If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
+ *
+ * Optional.
*/
- int (*commit_txn) (struct pmu *pmu); /* optional */
+ int (*commit_txn) (struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->del() is called
* for each successful ->add() during the transaction.
+ *
+ * Optional.
*/
- void (*cancel_txn) (struct pmu *pmu); /* optional */
+ void (*cancel_txn) (struct pmu *pmu);
/*
* Will return the value for perf_event_mmap_page::index for this event,
@@ -300,6 +386,11 @@ struct pmu {
* Free pmu-private AUX data structures
*/
void (*free_aux) (void *aux); /* optional */
+
+ /*
+ * Filter events for PMU-specific reasons.
+ */
+ int (*filter_match) (struct perf_event *event); /* optional */
};
/**
@@ -479,7 +570,7 @@ struct perf_event {
void *overflow_handler_context;
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call *tp_event;
+ struct trace_event_call *tp_event;
struct event_filter *filter;
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
@@ -562,8 +653,12 @@ struct perf_cpu_context {
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
+
+ raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
+ unsigned int hrtimer_active;
+
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
@@ -602,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
- return container_of(task_css(task, perf_event_cgrp_id),
+ return container_of(task_css_check(task, perf_event_cgrp_id,
+ ctx ? lockdep_is_held(&ctx->lock)
+ : true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
@@ -632,6 +729,8 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
@@ -650,6 +749,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -730,6 +830,22 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample);
+
+extern void
+perf_log_lost_samples(struct perf_event *event, u64 lost);
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -794,11 +910,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
extern struct static_key_deferred perf_sched_events;
+static __always_inline bool
+perf_sw_migrate_enabled(void)
+{
+ if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
+ return true;
+ return false;
+}
+
+static inline void perf_event_task_migrate(struct task_struct *task)
+{
+ if (perf_sw_migrate_enabled())
+ task->sched_migrated = 1;
+}
+
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
+
+ if (perf_sw_migrate_enabled() && task->sched_migrated) {
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ task->sched_migrated = 0;
+ }
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
@@ -921,6 +1059,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,
static inline void *
perf_get_aux(struct perf_output_handle *handle) { return NULL; }
static inline void
+perf_event_task_migrate(struct task_struct *task) { }
+static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
static inline void
@@ -930,6 +1070,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -962,6 +1108,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
+static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/kernel/include/linux/phy.h b/kernel/include/linux/phy.h
index 685809835..05fde31b6 100644
--- a/kernel/include/linux/phy.h
+++ b/kernel/include/linux/phy.h
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
+#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
* PHYs should register using this structure
*/
struct mii_bus {
+ struct module *owner;
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
@@ -181,6 +183,9 @@ struct mii_bus {
/* PHY addresses to be ignored when probing */
u32 phy_mask;
+ /* PHY addresses to ignore the TA/read failure */
+ u32 phy_ignore_ta_mask;
+
/*
* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address
@@ -195,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
return mdiobus_alloc_size(0);
}
-int mdiobus_register(struct mii_bus *bus);
+int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -207,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
#define PHY_INTERRUPT_DISABLED 0x0
@@ -327,6 +335,7 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
@@ -365,6 +374,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
@@ -421,6 +431,8 @@ struct phy_device {
struct net_device *attached_dev;
+ u8 mdix;
+
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -675,6 +687,27 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+ return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+ return phydev->is_pseudo_fixed_link;
+}
+
+/**
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
@@ -714,6 +747,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
struct phy_c45_device_ids *c45_ids);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
+void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
@@ -766,6 +800,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
void phy_device_free(struct phy_device *phydev);
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
diff --git a/kernel/include/linux/phy/phy-sun4i-usb.h b/kernel/include/linux/phy/phy-sun4i-usb.h
new file mode 100644
index 000000000..50aed92ea
--- /dev/null
+++ b/kernel/include/linux/phy/phy-sun4i-usb.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PHY_SUN4I_USB_H_
+#define PHY_SUN4I_USB_H_
+
+#include "phy.h"
+
+/**
+ * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect
+ * @phy: reference to a sun4i usb phy
+ * @enabled: wether to enable or disable squelch detect
+ */
+void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled);
+
+#endif
diff --git a/kernel/include/linux/phy/phy.h b/kernel/include/linux/phy/phy.h
index a0197fa1b..8cf05e341 100644
--- a/kernel/include/linux/phy/phy.h
+++ b/kernel/include/linux/phy/phy.h
@@ -133,6 +133,8 @@ struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
+ int index);
void phy_put(struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
@@ -261,6 +263,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
+ struct device_node *np,
+ int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void phy_put(struct phy *phy)
{
}
diff --git a/kernel/include/linux/phy_fixed.h b/kernel/include/linux/phy_fixed.h
index fe5732d53..2400d2ea4 100644
--- a/kernel/include/linux/phy_fixed.h
+++ b/kernel/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
+ struct fixed_phy_status *status,
+ int link_gpio);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int link_gpio,
struct device_node *np);
extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
+ struct fixed_phy_status *status,
+ int link_gpio)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
diff --git a/kernel/include/linux/pinctrl/consumer.h b/kernel/include/linux/pinctrl/consumer.h
index 18eccefea..d7e5d608f 100644
--- a/kernel/include/linux/pinctrl/consumer.h
+++ b/kernel/include/linux/pinctrl/consumer.h
@@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
s = pinctrl_lookup_state(p, name);
if (IS_ERR(s)) {
pinctrl_put(p);
- return ERR_PTR(PTR_ERR(s));
+ return ERR_CAST(s);
}
ret = pinctrl_select_state(p, s);
diff --git a/kernel/include/linux/pinctrl/devinfo.h b/kernel/include/linux/pinctrl/devinfo.h
index 281cb91dd..05082e407 100644
--- a/kernel/include/linux/pinctrl/devinfo.h
+++ b/kernel/include/linux/pinctrl/devinfo.h
@@ -24,10 +24,14 @@
* struct dev_pin_info - pin state container for devices
* @p: pinctrl handle for the containing device
* @default_state: the default state for the handle, if found
+ * @init_state: the state at probe time, if found
+ * @sleep_state: the state at suspend time, if found
+ * @idle_state: the state at idle (runtime suspend) time, if found
*/
struct dev_pin_info {
struct pinctrl *p;
struct pinctrl_state *default_state;
+ struct pinctrl_state *init_state;
#ifdef CONFIG_PM
struct pinctrl_state *sleep_state;
struct pinctrl_state *idle_state;
@@ -35,6 +39,7 @@ struct dev_pin_info {
};
extern int pinctrl_bind_pins(struct device *dev);
+extern int pinctrl_init_done(struct device *dev);
#else
@@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev)
return 0;
}
+static inline int pinctrl_init_done(struct device *dev)
+{
+ return 0;
+}
+
#endif /* CONFIG_PINCTRL */
#endif /* PINCTRL_DEVINFO_H */
diff --git a/kernel/include/linux/pinctrl/pinconf-generic.h b/kernel/include/linux/pinctrl/pinconf-generic.h
index fe65962b2..d921afd5f 100644
--- a/kernel/include/linux/pinctrl/pinconf-generic.h
+++ b/kernel/include/linux/pinctrl/pinconf-generic.h
@@ -20,6 +20,11 @@
/**
* enum pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
+ * weakly drives the last value on a tristate bus, also known as a "bus
+ * holder", "bus keeper" or "repeater". This allows another device on the
+ * bus to change the value by driving the bus high or low and switching to
+ * tristate. The argument is ignored.
* @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a
* transition from say pull-up to pull-down implies that you disable
* pull-up in the process, this setting disables all biasing.
@@ -29,14 +34,6 @@
* if for example some other pin is going to drive the signal connected
* to it for a while. Pins used for input are usually always high
* impedance.
- * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
- * weakly drives the last value on a tristate bus, also known as a "bus
- * holder", "bus keeper" or "repeater". This allows another device on the
- * bus to change the value by driving the bus high or low and switching to
- * tristate. The argument is ignored.
- * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
- * impedance to VDD). If the argument is != 0 pull-up is enabled,
- * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
* @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
* impedance to GROUND). If the argument is != 0 pull-down is enabled,
* if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
@@ -48,10 +45,9 @@
* If the argument is != 0 pull up/down is enabled, if it is 0, the
* configuration is ignored. The proper way to disable it is to use
* @PIN_CONFIG_BIAS_DISABLE.
- * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
- * low, this is the most typical case and is typically achieved with two
- * active transistors on the output. Setting this config will enable
- * push-pull mode, the argument is ignored.
+ * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
+ * impedance to VDD). If the argument is != 0 pull-up is enabled,
+ * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -59,28 +55,26 @@
* @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
* (open emitter). Setting this config will enable open source mode, the
* argument is ignored.
+ * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
+ * low, this is the most typical case and is typically achieved with two
+ * active transistors on the output. Setting this config will enable
+ * push-pull mode, the argument is ignored.
* @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
* passed as argument. The argument is in mA.
+ * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
+ * which means it will wait for signals to settle when reading inputs. The
+ * argument gives the debounce time in usecs. Setting the
+ * argument to zero turns debouncing off.
* @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not
* affect the pin's ability to drive output. 1 enables input, 0 disables
* input.
- * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
- * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
- * schmitt-trigger mode is disabled.
* @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
* schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
* the threshold value is given on a custom format as argument when
* setting pins to this mode.
- * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
- * which means it will wait for signals to settle when reading inputs. The
- * argument gives the debounce time in usecs. Setting the
- * argument to zero turns debouncing off.
- * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
- * supplies, the argument to this parameter (on a custom format) tells
- * the driver which alternative power source to use.
- * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
- * this parameter (on a custom format) tells the driver which alternative
- * slew rate to use.
+ * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
+ * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
+ * schmitt-trigger mode is disabled.
* @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
@@ -89,29 +83,35 @@
* 1 to indicate high level, argument 0 to indicate low level. (Please
* see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
* discussion around this parameter.)
+ * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
+ * supplies, the argument to this parameter (on a custom format) tells
+ * the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * slew rate to use.
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
*/
enum pin_config_param {
+ PIN_CONFIG_BIAS_BUS_HOLD,
PIN_CONFIG_BIAS_DISABLE,
PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
- PIN_CONFIG_BIAS_BUS_HOLD,
- PIN_CONFIG_BIAS_PULL_UP,
PIN_CONFIG_BIAS_PULL_DOWN,
PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
- PIN_CONFIG_DRIVE_PUSH_PULL,
+ PIN_CONFIG_BIAS_PULL_UP,
PIN_CONFIG_DRIVE_OPEN_DRAIN,
PIN_CONFIG_DRIVE_OPEN_SOURCE,
+ PIN_CONFIG_DRIVE_PUSH_PULL,
PIN_CONFIG_DRIVE_STRENGTH,
+ PIN_CONFIG_INPUT_DEBOUNCE,
PIN_CONFIG_INPUT_ENABLE,
- PIN_CONFIG_INPUT_SCHMITT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
- PIN_CONFIG_INPUT_DEBOUNCE,
- PIN_CONFIG_POWER_SOURCE,
- PIN_CONFIG_SLEW_RATE,
+ PIN_CONFIG_INPUT_SCHMITT_ENABLE,
PIN_CONFIG_LOW_POWER_MODE,
PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7FFF,
};
diff --git a/kernel/include/linux/pinctrl/pinctrl-state.h b/kernel/include/linux/pinctrl/pinctrl-state.h
index b5919f8e6..230735193 100644
--- a/kernel/include/linux/pinctrl/pinctrl-state.h
+++ b/kernel/include/linux/pinctrl/pinctrl-state.h
@@ -9,6 +9,13 @@
* hogs to configure muxing and pins at boot, and also as a state
* to go into when returning from sleep and idle in
* .pm_runtime_resume() or ordinary .resume() for example.
+ * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default"
+ * before the driver's probe() function is called. There are some
+ * drivers where that is not appropriate becausing doing so would
+ * glitch the pins. In those cases you can add an "init" pinctrl
+ * which is the state of the pins before drive probe. After probe
+ * if the pins are still in "init" state they'll be moved to
+ * "default".
* @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into
* when the pins are idle. This is a state where the system is relaxed
* but not fully sleeping - some power may be on but clocks gated for
@@ -20,5 +27,6 @@
* ordinary .suspend() function.
*/
#define PINCTRL_STATE_DEFAULT "default"
+#define PINCTRL_STATE_INIT "init"
#define PINCTRL_STATE_IDLE "idle"
#define PINCTRL_STATE_SLEEP "sleep"
diff --git a/kernel/include/linux/pinctrl/pinctrl.h b/kernel/include/linux/pinctrl/pinctrl.h
index 66e469751..9ba59fcba 100644
--- a/kernel/include/linux/pinctrl/pinctrl.h
+++ b/kernel/include/linux/pinctrl/pinctrl.h
@@ -127,7 +127,7 @@ struct pinctrl_ops {
*/
struct pinctrl_desc {
const char *name;
- struct pinctrl_pin_desc const *pins;
+ const struct pinctrl_pin_desc *pins;
unsigned int npins;
const struct pinctrl_ops *pctlops;
const struct pinmux_ops *pmxops;
diff --git a/kernel/include/linux/pinctrl/pinmux.h b/kernel/include/linux/pinctrl/pinmux.h
index 511bda9ed..ace60d775 100644
--- a/kernel/include/linux/pinctrl/pinmux.h
+++ b/kernel/include/linux/pinctrl/pinmux.h
@@ -56,6 +56,9 @@ struct pinctrl_dev;
* depending on whether the GPIO is configured as input or output,
* a direction selector function may be implemented as a backing
* to the GPIO controllers that need pin muxing.
+ * @strict: do not allow simultaneous use of the same pin for GPIO and another
+ * function. Check both gpio_owner and mux_owner strictly before approving
+ * the pin request.
*/
struct pinmux_ops {
int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -66,7 +69,7 @@ struct pinmux_ops {
int (*get_function_groups) (struct pinctrl_dev *pctldev,
unsigned selector,
const char * const **groups,
- unsigned * const num_groups);
+ unsigned *num_groups);
int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
unsigned group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
@@ -79,6 +82,7 @@ struct pinmux_ops {
struct pinctrl_gpio_range *range,
unsigned offset,
bool input);
+ bool strict;
};
#endif /* CONFIG_PINMUX */
diff --git a/kernel/include/linux/platform_data/atmel.h b/kernel/include/linux/platform_data/atmel.h
index 4b452c6a2..3c8825b67 100644
--- a/kernel/include/linux/platform_data/atmel.h
+++ b/kernel/include/linux/platform_data/atmel.h
@@ -9,30 +9,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/leds.h>
-#include <linux/spi/spi.h>
-#include <linux/usb/atmel_usba_udc.h>
-#include <linux/atmel-mci.h>
-#include <sound/atmel-ac97c.h>
#include <linux/serial.h>
-#include <linux/platform_data/macb.h>
-
-/*
- * at91: 6 USARTs and one DBGU port (SAM9260)
- * avr32: 4
- */
-#define ATMEL_MAX_UART 7
-
- /* USB Device */
-struct at91_udc_data {
- int vbus_pin; /* high == host powering us */
- u8 vbus_active_low; /* vbus polarity */
- u8 vbus_polled; /* Use polling, not interrupt */
- int pullup_pin; /* active == D+ pulled up */
- u8 pullup_active_low; /* true == pullup_pin is active low */
-};
/* Compact Flash */
struct at91_cf_data {
@@ -46,18 +23,6 @@ struct at91_cf_data {
#define AT91_IDE_SWAP_A0_A2 0x02
};
- /* USB Host */
-#define AT91_MAX_USBH_PORTS 3
-struct at91_usbh_data {
- int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
- int overcurrent_pin[AT91_MAX_USBH_PORTS];
- u8 ports; /* number of ports on root hub */
- u8 overcurrent_supported;
- u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
- u8 overcurrent_status[AT91_MAX_USBH_PORTS];
- u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
-};
-
/* NAND / SmartMedia */
struct atmel_nand_data {
int enable_pin; /* chip enable */
@@ -86,11 +51,6 @@ struct atmel_uart_data {
struct serial_rs485 rs485; /* rs485 settings */
};
-/* CAN */
-struct at91_can_data {
- void (*transceiver_switch)(int on);
-};
-
/* FIXME: this needs a better location, but gets stuff building again */
extern int at91_suspend_entering_slow_clock(void);
diff --git a/kernel/include/linux/i2c/atmel_mxt_ts.h b/kernel/include/linux/platform_data/atmel_mxt_ts.h
index 02bf6ea31..695035a8d 100644
--- a/kernel/include/linux/i2c/atmel_mxt_ts.h
+++ b/kernel/include/linux/platform_data/atmel_mxt_ts.h
@@ -10,16 +10,22 @@
* option) any later version.
*/
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
#include <linux/types.h>
+enum mxt_suspend_mode {
+ MXT_SUSPEND_DEEP_SLEEP = 0,
+ MXT_SUSPEND_T9_CTRL = 1,
+};
+
/* The platform data for the Atmel maXTouch touchscreen driver */
struct mxt_platform_data {
unsigned long irqflags;
u8 t19_num_keys;
const unsigned int *t19_keymap;
+ enum mxt_suspend_mode suspend_mode;
};
-#endif /* __LINUX_ATMEL_MXT_TS_H */
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/kernel/include/linux/platform_data/clk-ux500.h b/kernel/include/linux/platform_data/clk-ux500.h
index 97baf831e..3af0da1f3 100644
--- a/kernel/include/linux/platform_data/clk-ux500.h
+++ b/kernel/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
#ifndef __CLK_UX500_H
#define __CLK_UX500_H
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
+void u8500_clk_init(void);
+void u9540_clk_init(void);
+void u8540_clk_init(void);
#endif /* __CLK_UX500_H */
diff --git a/kernel/include/linux/platform_data/dma-dw.h b/kernel/include/linux/platform_data/dma-dw.h
index 87ac14c58..03b6095d3 100644
--- a/kernel/include/linux/platform_data/dma-dw.h
+++ b/kernel/include/linux/platform_data/dma-dw.h
@@ -37,6 +37,7 @@ struct dw_dma_slave {
* @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
+ * @is_memcpy: The device channels do support memory-to-memory transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -47,6 +48,7 @@ struct dw_dma_slave {
struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
+ bool is_memcpy;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/kernel/include/linux/platform_data/dma-hsu.h b/kernel/include/linux/platform_data/dma-hsu.h
index 8a1f6a492..3453fa655 100644
--- a/kernel/include/linux/platform_data/dma-hsu.h
+++ b/kernel/include/linux/platform_data/dma-hsu.h
@@ -18,8 +18,4 @@ struct hsu_dma_slave {
int chan_id;
};
-struct hsu_dma_platform_data {
- unsigned short nr_channels;
-};
-
#endif /* _PLATFORM_DATA_DMA_HSU_H */
diff --git a/kernel/include/linux/platform_data/dma-rcar-audmapp.h b/kernel/include/linux/platform_data/dma-rcar-audmapp.h
deleted file mode 100644
index 471fffebb..000000000
--- a/kernel/include/linux/platform_data/dma-rcar-audmapp.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This is for Renesas R-Car Audio-DMAC-peri-peri.
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This file is based on the include/linux/sh_dma.h
- *
- * Header for the new SH dmaengine driver
- *
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef SH_AUDMAPP_H
-#define SH_AUDMAPP_H
-
-#include <linux/dmaengine.h>
-
-struct audmapp_slave_config {
- int slave_id;
- dma_addr_t src;
- dma_addr_t dst;
- u32 chcr;
-};
-
-struct audmapp_pdata {
- struct audmapp_slave_config *slave;
- int slave_num;
-};
-
-#endif /* SH_AUDMAPP_H */
diff --git a/kernel/include/linux/platform_data/edma.h b/kernel/include/linux/platform_data/edma.h
index bdb2710e2..4299f4ba0 100644
--- a/kernel/include/linux/platform_data/edma.h
+++ b/kernel/include/linux/platform_data/edma.h
@@ -41,51 +41,6 @@
#ifndef EDMA_H_
#define EDMA_H_
-/* PaRAM slots are laid out like this */
-struct edmacc_param {
- u32 opt;
- u32 src;
- u32 a_b_cnt;
- u32 dst;
- u32 src_dst_bidx;
- u32 link_bcntrld;
- u32 src_dst_cidx;
- u32 ccnt;
-} __packed;
-
-/* fields in edmacc_param.opt */
-#define SAM BIT(0)
-#define DAM BIT(1)
-#define SYNCDIM BIT(2)
-#define STATIC BIT(3)
-#define EDMA_FWID (0x07 << 8)
-#define TCCMODE BIT(11)
-#define EDMA_TCC(t) ((t) << 12)
-#define TCINTEN BIT(20)
-#define ITCINTEN BIT(21)
-#define TCCHEN BIT(22)
-#define ITCCHEN BIT(23)
-
-/*ch_status paramater of callback function possible values*/
-#define EDMA_DMA_COMPLETE 1
-#define EDMA_DMA_CC_ERROR 2
-#define EDMA_DMA_TC1_ERROR 3
-#define EDMA_DMA_TC2_ERROR 4
-
-enum address_mode {
- INCR = 0,
- FIFO = 1
-};
-
-enum fifo_width {
- W8BIT = 0,
- W16BIT = 1,
- W32BIT = 2,
- W64BIT = 3,
- W128BIT = 4,
- W256BIT = 5
-};
-
enum dma_event_q {
EVENTQ_0 = 0,
EVENTQ_1 = 1,
@@ -94,64 +49,10 @@ enum dma_event_q {
EVENTQ_DEFAULT = -1
};
-enum sync_dimension {
- ASYNC = 0,
- ABSYNC = 1
-};
-
#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
#define EDMA_CTLR(i) ((i) >> 16)
#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
-#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
-#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
-#define EDMA_CONT_PARAMS_ANY 1001
-#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
-#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
-
-#define EDMA_MAX_CC 2
-
-/* alloc/free DMA channels and their dedicated parameter RAM slots */
-int edma_alloc_channel(int channel,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data, enum dma_event_q);
-void edma_free_channel(unsigned channel);
-
-/* alloc/free parameter RAM slots */
-int edma_alloc_slot(unsigned ctlr, int slot);
-void edma_free_slot(unsigned slot);
-
-/* alloc/free a set of contiguous parameter RAM slots */
-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
-int edma_free_cont_slots(unsigned slot, int count);
-
-/* calls that operate on part of a parameter RAM slot */
-void edma_set_src(unsigned slot, dma_addr_t src_port,
- enum address_mode mode, enum fifo_width);
-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
- enum address_mode mode, enum fifo_width);
-dma_addr_t edma_get_position(unsigned slot, bool dst);
-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
-void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
- u16 bcnt_rld, enum sync_dimension sync_mode);
-void edma_link(unsigned from, unsigned to);
-void edma_unlink(unsigned from);
-
-/* calls that operate on an entire parameter RAM slot */
-void edma_write_slot(unsigned slot, const struct edmacc_param *params);
-void edma_read_slot(unsigned slot, struct edmacc_param *params);
-
-/* channel control operations */
-int edma_start(unsigned channel);
-void edma_stop(unsigned channel);
-void edma_clean_channel(unsigned channel);
-void edma_clear_event(unsigned channel);
-void edma_pause(unsigned channel);
-void edma_resume(unsigned channel);
-
-void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no);
-
struct edma_rsv_info {
const s16 (*rsv_chans)[2];
@@ -170,10 +71,11 @@ struct edma_soc_info {
/* Resource reservation for other cores */
struct edma_rsv_info *rsv;
+ /* List of channels allocated for memcpy, terminated with -1 */
+ s32 *memcpy_channels;
+
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
};
-int edma_trigger_channel(unsigned);
-
#endif
diff --git a/kernel/include/linux/platform_data/gpio-ath79.h b/kernel/include/linux/platform_data/gpio-ath79.h
new file mode 100644
index 000000000..88b0db7be
--- /dev/null
+++ b/kernel/include/linux/platform_data/gpio-ath79.h
@@ -0,0 +1,19 @@
+/*
+ * Atheros AR7XXX/AR9XXX GPIO controller platform data
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
+#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
+
+struct ath79_gpio_platform_data {
+ unsigned ngpios;
+ bool oe_inverted;
+};
+
+#endif
diff --git a/kernel/include/linux/platform_data/gpio-em.h b/kernel/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519d2..000000000
--- a/kernel/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
- unsigned int gpio_base;
- unsigned int irq_base;
- unsigned int number_of_pins;
- const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/kernel/include/linux/platform_data/gpio-omap.h b/kernel/include/linux/platform_data/gpio-omap.h
index 5d50b25a7..cb2618147 100644
--- a/kernel/include/linux/platform_data/gpio-omap.h
+++ b/kernel/include/linux/platform_data/gpio-omap.h
@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
extern void omap2_gpio_prepare_for_idle(int off_mode);
extern void omap2_gpio_resume_after_idle(void);
-extern void omap_set_gpio_debounce(int gpio, int enable);
-extern void omap_set_gpio_debounce_time(int gpio, int enable);
+#else
+static inline void omap2_gpio_prepare_for_idle(int off_mode)
+{
+}
+
+static inline void omap2_gpio_resume_after_idle(void)
+{
+}
+#endif
#endif
diff --git a/kernel/include/linux/platform_data/i2c-mux-reg.h b/kernel/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 000000000..c68712aad
--- /dev/null
+++ b/kernel/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+
+/**
+ * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of value for each channel
+ * @n_values: Number of multiplexer channels
+ * @little_endian: Indicating if the register is in little endian
+ * @write_only: Reading the register is not allowed by hardware
+ * @classes: Optional I2C auto-detection classes
+ * @idle: Value to write to mux when idle
+ * @idle_in_use: indicate if idle value is in use
+ * @reg: Virtual address of the register to switch channel
+ * @reg_size: register size in bytes
+ */
+struct i2c_mux_reg_platform_data {
+ int parent;
+ int base_nr;
+ const unsigned int *values;
+ int n_values;
+ bool little_endian;
+ bool write_only;
+ const unsigned int *classes;
+ u32 idle;
+ bool idle_in_use;
+ void __iomem *reg;
+ resource_size_t reg_size;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/kernel/include/linux/platform_data/irq-renesas-irqc.h b/kernel/include/linux/platform_data/irq-renesas-irqc.h
deleted file mode 100644
index 3ae17b3e0..000000000
--- a/kernel/include/linux/platform_data/irq-renesas-irqc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Renesas IRQC Driver
- *
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __IRQ_RENESAS_IRQC_H__
-#define __IRQ_RENESAS_IRQC_H__
-
-struct renesas_irqc_config {
- unsigned int irq_base;
-};
-
-#endif /* __IRQ_RENESAS_IRQC_H__ */
diff --git a/kernel/include/linux/platform_data/itco_wdt.h b/kernel/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 000000000..f16542c77
--- /dev/null
+++ b/kernel/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS_PMC 0
+
+struct itco_wdt_platform_data {
+ char name[32];
+ unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/kernel/include/linux/platform_data/keyboard-spear.h b/kernel/include/linux/platform_data/keyboard-spear.h
index 9248e3a7e..5e3ff6539 100644
--- a/kernel/include/linux/platform_data/keyboard-spear.h
+++ b/kernel/include/linux/platform_data/keyboard-spear.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/kernel/include/linux/platform_data/leds-kirkwood-netxbig.h b/kernel/include/linux/platform_data/leds-kirkwood-netxbig.h
index d2be19a51..3c85a735c 100644
--- a/kernel/include/linux/platform_data/leds-kirkwood-netxbig.h
+++ b/kernel/include/linux/platform_data/leds-kirkwood-netxbig.h
@@ -40,6 +40,7 @@ struct netxbig_led {
int mode_addr;
int *mode_val;
int bright_addr;
+ int bright_max;
};
struct netxbig_led_platform_data {
diff --git a/kernel/include/linux/platform_data/leds-kirkwood-ns2.h b/kernel/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed57f..eb8a6860e 100644
--- a/kernel/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/kernel/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
#ifndef __LEDS_KIRKWOOD_NS2_H
#define __LEDS_KIRKWOOD_NS2_H
+enum ns2_led_modes {
+ NS_V2_LED_OFF,
+ NS_V2_LED_ON,
+ NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+ enum ns2_led_modes mode;
+ int cmd_level;
+ int slow_level;
+};
+
struct ns2_led {
const char *name;
const char *default_trigger;
unsigned cmd;
unsigned slow;
+ int num_modes;
+ struct ns2_led_modval *modval;
};
struct ns2_led_platform_data {
diff --git a/kernel/include/linux/platform_data/lp855x.h b/kernel/include/linux/platform_data/lp855x.h
index 9c7fd1efe..1b2ba24e4 100644
--- a/kernel/include/linux/platform_data/lp855x.h
+++ b/kernel/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@ struct lp855x_rom_data {
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
*/
struct lp855x_platform_data {
const char *name;
@@ -145,7 +144,6 @@ struct lp855x_platform_data {
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
- struct regulator *supply;
};
#endif
diff --git a/kernel/include/linux/platform_data/macb.h b/kernel/include/linux/platform_data/macb.h
index 044a124bf..21b15f6fe 100644
--- a/kernel/include/linux/platform_data/macb.h
+++ b/kernel/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
#ifndef __MACB_PDATA_H__
#define __MACB_PDATA_H__
+/**
+ * struct macb_platform_data - platform data for MACB Ethernet
+ * @phy_mask: phy mask passed when register the MDIO bus
+ * within the driver
+ * @phy_irq_pin: PHY IRQ
+ * @is_rmii: using RMII interface?
+ * @rev_eth_addr: reverse Ethernet address byte order
+ */
struct macb_platform_data {
u32 phy_mask;
- int phy_irq_pin; /* PHY IRQ */
- u8 is_rmii; /* using RMII interface? */
- u8 rev_eth_addr; /* reverse Ethernet address byte order */
+ int phy_irq_pin;
+ u8 is_rmii;
+ u8 rev_eth_addr;
};
#endif /* __MACB_PDATA_H__ */
diff --git a/kernel/include/linux/mdio-gpio.h b/kernel/include/linux/platform_data/mdio-gpio.h
index 66c30a763..11f00cdab 100644
--- a/kernel/include/linux/mdio-gpio.h
+++ b/kernel/include/linux/platform_data/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
bool mdio_active_low;
bool mdo_active_low;
- unsigned int phy_mask;
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
int irqs[PHY_MAX_ADDR];
/* reset callback */
int (*reset)(struct mii_bus *bus);
diff --git a/kernel/include/linux/platform_data/mmc-esdhc-imx.h b/kernel/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac..95ccab3f4 100644
--- a/kernel/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/kernel/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,8 +43,8 @@ struct esdhc_platform_data {
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
- unsigned int f_max;
bool support_vsel;
unsigned int delay_line;
+ unsigned int tuning_step; /* The delay cell steps in tuning procedure */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/kernel/include/linux/platform_data/mtd-nand-pxa3xx.h b/kernel/include/linux/platform_data/mtd-nand-pxa3xx.h
index ac4ea2e64..394d15597 100644
--- a/kernel/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/kernel/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -4,30 +4,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-struct pxa3xx_nand_timing {
- unsigned int tCH; /* Enable signal hold time */
- unsigned int tCS; /* Enable signal setup time */
- unsigned int tWH; /* ND_nWE high duration */
- unsigned int tWP; /* ND_nWE pulse time */
- unsigned int tRH; /* ND_nRE high duration */
- unsigned int tRP; /* ND_nRE pulse width */
- unsigned int tR; /* ND_nWE high to ND_nRE low for read */
- unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
- unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
-};
-
-struct pxa3xx_nand_flash {
- char *name;
- uint32_t chip_id;
- unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
- unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
- unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
- unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
- unsigned int num_blocks; /* Number of physical blocks in Flash */
-
- struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
-};
-
/*
* Current pxa3xx_nand controller has two chip select which
* both be workable.
@@ -63,9 +39,6 @@ struct pxa3xx_nand_platform_data {
const struct mtd_partition *parts[NUM_CHIP_SELECT];
unsigned int nr_parts[NUM_CHIP_SELECT];
-
- const struct pxa3xx_nand_flash * flash;
- size_t num_flash;
};
extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
diff --git a/kernel/include/linux/platform_data/nfcmrvl.h b/kernel/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 000000000..a6f9d633f
--- /dev/null
+++ b/kernel/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _NFCMRVL_PTF_H_
+#define _NFCMRVL_PTF_H_
+
+struct nfcmrvl_platform_data {
+ /*
+ * Generic
+ */
+
+ /* GPIO that is wired to RESET_N signal */
+ unsigned int reset_n_io;
+ /* Tell if transport is muxed in HCI one */
+ unsigned int hci_muxed;
+
+ /*
+ * UART specific
+ */
+
+ /* Tell if UART needs flow control at init */
+ unsigned int flow_control;
+ /* Tell if firmware supports break control for power management */
+ unsigned int break_control;
+
+
+ /*
+ * I2C specific
+ */
+
+ unsigned int irq;
+ unsigned int irq_polarity;
+};
+
+#endif /* _NFCMRVL_PTF_H_ */
diff --git a/kernel/include/linux/platform_data/ntc_thermistor.h b/kernel/include/linux/platform_data/ntc_thermistor.h
index 0a6de4ca4..aed170588 100644
--- a/kernel/include/linux/platform_data/ntc_thermistor.h
+++ b/kernel/include/linux/platform_data/ntc_thermistor.h
@@ -27,6 +27,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWB473,
TYPE_NCPXXWL333,
TYPE_B57330V2103,
+ TYPE_NCPXXWF104,
};
struct ntc_thermistor_platform_data {
diff --git a/kernel/include/linux/input/pixcir_ts.h b/kernel/include/linux/platform_data/pixcir_i2c_ts.h
index 7bae83b7c..646af6f8b 100644
--- a/kernel/include/linux/input/pixcir_ts.h
+++ b/kernel/include/linux/platform_data/pixcir_i2c_ts.h
@@ -57,7 +57,6 @@ struct pixcir_i2c_chip_data {
struct pixcir_ts_platform_data {
int x_max;
int y_max;
- int gpio_attb; /* GPIO connected to ATTB line */
struct pixcir_i2c_chip_data chip;
};
diff --git a/kernel/include/linux/platform_data/s3c-hsotg.h b/kernel/include/linux/platform_data/s3c-hsotg.h
index 3f1cbf95e..3982586ba 100644
--- a/kernel/include/linux/platform_data/s3c-hsotg.h
+++ b/kernel/include/linux/platform_data/s3c-hsotg.h
@@ -17,19 +17,19 @@
struct platform_device;
-enum s3c_hsotg_dmamode {
+enum dwc2_hsotg_dmamode {
S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */
S3C_HSOTG_DMA_ONLY, /* always use DMA */
S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */
};
/**
- * struct s3c_hsotg_plat - platform data for high-speed otg/udc
+ * struct dwc2_hsotg_plat - platform data for high-speed otg/udc
* @dma: Whether to use DMA or not.
* @is_osc: The clock source is an oscillator, not a crystal
*/
-struct s3c_hsotg_plat {
- enum s3c_hsotg_dmamode dma;
+struct dwc2_hsotg_plat {
+ enum dwc2_hsotg_dmamode dma;
unsigned int is_osc:1;
int phy_type;
@@ -37,6 +37,6 @@ struct s3c_hsotg_plat {
int (*phy_exit)(struct platform_device *pdev, int type);
};
-extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd);
+extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd);
#endif /* __LINUX_USB_S3C_HSOTG_H */
diff --git a/kernel/include/linux/platform_data/spi-davinci.h b/kernel/include/linux/platform_data/spi-davinci.h
index 8dc2fa47a..f4edcb03c 100644
--- a/kernel/include/linux/platform_data/spi-davinci.h
+++ b/kernel/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@ struct davinci_spi_platform_data {
u8 num_chipselect;
u8 intr_line;
u8 *chip_sel;
+ u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
};
diff --git a/kernel/include/linux/platform_data/spi-mt65xx.h b/kernel/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 000000000..54b044839
--- /dev/null
+++ b/kernel/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
+/*
+ * MTK SPI bus driver definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
+#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
+
+/* Board specific platform_data */
+struct mtk_chip_config {
+ u32 tx_mlsb;
+ u32 rx_mlsb;
+};
+#endif
diff --git a/kernel/include/linux/platform_data/st21nfcb.h b/kernel/include/linux/platform_data/st-nci.h
index b023373d9..f6494b347 100644
--- a/kernel/include/linux/platform_data/st21nfcb.h
+++ b/kernel/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
/*
- * Driver include for the ST21NFCB NFC chip.
+ * Driver include for ST NCI NFC chip family.
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,16 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _ST21NFCB_NCI_H_
-#define _ST21NFCB_NCI_H_
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
-#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+#define ST_NCI_DRIVER_NAME "st_nci"
-struct st21nfcb_nfc_platform_data {
+struct st_nci_nfc_platform_data {
unsigned int gpio_reset;
unsigned int irq_polarity;
+ bool is_ese_present;
+ bool is_uicc_present;
};
-#endif /* _ST21NFCB_NCI_H_ */
+#endif /* _ST_NCI_H_ */
diff --git a/kernel/include/linux/platform_data/usb-rcar-gen2-phy.h b/kernel/include/linux/platform_data/usb-rcar-gen2-phy.h
deleted file mode 100644
index dd3ba46c0..000000000
--- a/kernel/include/linux/platform_data/usb-rcar-gen2-phy.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __USB_RCAR_GEN2_PHY_H
-#define __USB_RCAR_GEN2_PHY_H
-
-#include <linux/types.h>
-
-struct rcar_gen2_phy_platform_data {
- /* USB channel 0 configuration */
- bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
- /* USB channel 2 configuration */
- bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
-};
-
-#endif
diff --git a/kernel/include/linux/platform_data/video-ep93xx.h b/kernel/include/linux/platform_data/video-ep93xx.h
index 92fc2b223..699ac4109 100644
--- a/kernel/include/linux/platform_data/video-ep93xx.h
+++ b/kernel/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
#define __VIDEO_EP93XX_H
struct platform_device;
-struct fb_videomode;
struct fb_info;
-#define EP93XXFB_USE_MODEDB 0
-
/* VideoAttributes flags */
#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
@@ -38,12 +35,7 @@ struct fb_info;
EP93XXFB_PIXEL_DATA_ENABLE)
struct ep93xxfb_mach_info {
- unsigned int num_modes;
- const struct fb_videomode *modes;
- const struct fb_videomode *default_mode;
- int bpp;
unsigned int flags;
-
int (*setup)(struct platform_device *pdev);
void (*teardown)(struct platform_device *pdev);
void (*blank)(int blank_mode, struct fb_info *info);
diff --git a/kernel/include/linux/platform_data/video-msm_fb.h b/kernel/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be3e..000000000
--- a/kernel/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Internal shared definitions for various MSM framebuffer parts.
- *
- * Copyright (C) 2007 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_FB_H_
-#define _MSM_FB_H_
-
-#include <linux/device.h>
-
-struct mddi_info;
-
-struct msm_fb_data {
- int xres; /* x resolution in pixels */
- int yres; /* y resolution in pixels */
- int width; /* disply width in mm */
- int height; /* display height in mm */
- unsigned output_format;
-};
-
-struct msmfb_callback {
- void (*func)(struct msmfb_callback *);
-};
-
-enum {
- MSM_MDDI_PMDH_INTERFACE,
- MSM_MDDI_EMDH_INTERFACE,
- MSM_EBI2_INTERFACE,
-};
-
-#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
-
-struct msm_panel_data {
- /* turns off the fb memory */
- int (*suspend)(struct msm_panel_data *);
- /* turns on the fb memory */
- int (*resume)(struct msm_panel_data *);
- /* turns off the panel */
- int (*blank)(struct msm_panel_data *);
- /* turns on the panel */
- int (*unblank)(struct msm_panel_data *);
- void (*wait_vsync)(struct msm_panel_data *);
- void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
- void (*clear_vsync)(struct msm_panel_data *);
- /* from the enum above */
- unsigned interface_type;
- /* data to be passed to the fb driver */
- struct msm_fb_data *fb_data;
-
- /* capabilities supported by the panel */
- uint32_t caps;
-};
-
-struct msm_mddi_client_data {
- void (*suspend)(struct msm_mddi_client_data *);
- void (*resume)(struct msm_mddi_client_data *);
- void (*activate_link)(struct msm_mddi_client_data *);
- void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
- uint32_t reg);
- uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
- void (*auto_hibernate)(struct msm_mddi_client_data *, int);
- /* custom data that needs to be passed from the board file to a
- * particular client */
- void *private_client_data;
- struct resource *fb_resource;
- /* from the list above */
- unsigned interface_type;
-};
-
-struct msm_mddi_platform_data {
- unsigned int clk_rate;
- void (*power_client)(struct msm_mddi_client_data *, int on);
-
- /* fixup the mfr name, product id */
- void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
-
- struct resource *fb_resource; /*optional*/
- /* number of clients in the list that follows */
- int num_clients;
- /* array of client information of clients */
- struct {
- unsigned product_id; /* mfr id in top 16 bits, product id
- * in lower 16 bits
- */
- char *name; /* the device name will be the platform
- * device name registered for the client,
- * it should match the name of the associated
- * driver
- */
- unsigned id; /* id for mddi client device node, will also
- * be used as device id of panel devices, if
- * the client device will have multiple panels
- * space must be left here for them
- */
- void *client_data; /* required private client data */
- unsigned int clk_rate; /* optional: if the client requires a
- * different mddi clk rate
- */
- } client_platform_data[];
-};
-
-struct mdp_blit_req;
-struct fb_info;
-struct mdp_device {
- struct device dev;
- void (*dma)(struct mdp_device *mpd, uint32_t addr,
- uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
- uint32_t y, struct msmfb_callback *callback, int interface);
- void (*dma_wait)(struct mdp_device *mdp);
- int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
- struct mdp_blit_req *req);
- void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
-};
-
-struct class_interface;
-int register_mdp_client(struct class_interface *class_intf);
-
-/**** private client data structs go below this line ***/
-
-struct msm_mddi_bridge_platform_data {
- /* from board file */
- int (*init)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*uninit)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- /* passed to panel for use by the fb driver */
- int (*blank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*unblank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- struct msm_fb_data fb_data;
-};
-
-
-
-#endif
diff --git a/kernel/include/linux/platform_data/wkup_m3.h b/kernel/include/linux/platform_data/wkup_m3.h
new file mode 100644
index 000000000..3f1d77eff
--- /dev/null
+++ b/kernel/include/linux/platform_data/wkup_m3.h
@@ -0,0 +1,30 @@
+/*
+ * TI Wakeup M3 remote processor platform data
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H
+#define _LINUX_PLATFORM_DATA_WKUP_M3_H
+
+struct platform_device;
+
+struct wkup_m3_platform_data {
+ const char *reset_name;
+
+ int (*assert_reset)(struct platform_device *pdev, const char *name);
+ int (*deassert_reset)(struct platform_device *pdev, const char *name);
+};
+
+#endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */
diff --git a/kernel/include/linux/platform_data/zforce_ts.h b/kernel/include/linux/platform_data/zforce_ts.h
index 0472ab2f6..7bdece8ef 100644
--- a/kernel/include/linux/platform_data/zforce_ts.h
+++ b/kernel/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
#define _LINUX_INPUT_ZFORCE_TS_H
struct zforce_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
unsigned int x_max;
unsigned int y_max;
};
diff --git a/kernel/include/linux/platform_device.h b/kernel/include/linux/platform_device.h
index 58f1e75ba..dc777be5f 100644
--- a/kernel/include/linux/platform_device.h
+++ b/kernel/include/linux/platform_device.h
@@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev,
module_driver(__platform_driver, platform_driver_register, \
platform_driver_unregister)
+/* builtin_platform_driver() - Helper macro for builtin drivers that
+ * don't do anything special in driver init. This eliminates some
+ * boilerplate. Each driver may only use this macro once, and
+ * calling it replaces device_initcall(). Note this is meant to be
+ * a parallel of module_platform_driver() above, but w/o _exit stuff.
+ */
+#define builtin_platform_driver(__platform_driver) \
+ builtin_driver(__platform_driver, platform_driver_register)
+
/* module_platform_driver_probe() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
* boilerplate. Each module may only use this macro once, and
@@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \
} \
module_exit(__platform_driver##_exit);
+/* builtin_platform_driver_probe() - Helper macro for drivers that don't do
+ * anything special in device init. This eliminates some boilerplate. Each
+ * driver may only use this macro once, and using it replaces device_initcall.
+ * This is meant to be a parallel of module_platform_driver_probe above, but
+ * without the __exit parts.
+ */
+#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \
+static int __init __platform_driver##_init(void) \
+{ \
+ return platform_driver_probe(&(__platform_driver), \
+ __platform_probe); \
+} \
+device_initcall(__platform_driver##_init); \
+
#define platform_create_bundle(driver, probe, res, n_res, data, size) \
__platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
extern struct platform_device *__platform_create_bundle(
@@ -247,6 +270,14 @@ extern struct platform_device *__platform_create_bundle(
struct resource *res, unsigned int n_res,
const void *data, size_t size, struct module *module);
+int __platform_register_drivers(struct platform_driver * const *drivers,
+ unsigned int count, struct module *owner);
+void platform_unregister_drivers(struct platform_driver * const *drivers,
+ unsigned int count);
+
+#define platform_register_drivers(drivers, count) \
+ __platform_register_drivers(drivers, count, THIS_MODULE)
+
/* early platform driver interface */
struct early_platform_driver {
const char *class_str;
diff --git a/kernel/include/linux/pm.h b/kernel/include/linux/pm.h
index 2d29c64f8..528be6787 100644
--- a/kernel/include/linux/pm.h
+++ b/kernel/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
+#ifdef CONFIG_PM_SLEEP
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = suspend_fn, \
+ .resume_noirq = resume_fn, \
+ .freeze_noirq = suspend_fn, \
+ .thaw_noirq = resume_fn, \
+ .poweroff_noirq = suspend_fn, \
+ .restore_noirq = resume_fn,
+#else
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
};
struct wakeup_source;
+struct wake_irq;
struct pm_domain_data;
struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
+ struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
@@ -718,6 +732,7 @@ extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
+extern void pm_complete_with_resume_check(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/kernel/include/linux/pm_clock.h b/kernel/include/linux/pm_clock.h
index 0b0039634..25266c600 100644
--- a/kernel/include/linux/pm_clock.h
+++ b/kernel/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
struct clk;
+#ifdef CONFIG_PM
+extern int pm_clk_runtime_suspend(struct device *dev);
+extern int pm_clk_runtime_resume(struct device *dev);
+#define USE_PM_CLK_RUNTIME_OPS \
+ .runtime_suspend = pm_clk_runtime_suspend, \
+ .runtime_resume = pm_clk_runtime_resume,
+#else
+#define USE_PM_CLK_RUNTIME_OPS
+#endif
+
#ifdef CONFIG_PM_CLK
static inline bool pm_clk_no_clocks(struct device *dev)
{
diff --git a/kernel/include/linux/pm_domain.h b/kernel/include/linux/pm_domain.h
index 681ccb053..ba4ced38e 100644
--- a/kernel/include/linux/pm_domain.h
+++ b/kernel/include/linux/pm_domain.h
@@ -15,16 +15,12 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/notifier.h>
-#include <linux/cpuidle.h>
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
- GPD_STATE_BUSY, /* Something is happening to the PM domain */
- GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */
};
@@ -41,11 +37,6 @@ struct gpd_dev_ops {
bool (*active_wakeup)(struct device *dev);
};
-struct gpd_cpuidle_data {
- unsigned int saved_exit_latency;
- struct cpuidle_state *idle_state;
-};
-
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
@@ -56,12 +47,8 @@ struct generic_pm_domain {
struct dev_power_governor *gov;
struct work_struct power_off_work;
const char *name;
- unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
- wait_queue_head_t status_wait_queue;
- struct task_struct *poweroff_task; /* Powering off task */
- unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -74,7 +61,6 @@ struct generic_pm_domain {
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
bool cached_power_down_ok;
- struct gpd_cpuidle_data *cpuidle_data;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -95,10 +81,8 @@ struct gpd_link {
};
struct gpd_timing_data {
- s64 stop_latency_ns;
- s64 start_latency_ns;
- s64 save_state_latency_ns;
- s64 restore_state_latency_ns;
+ s64 suspend_latency_ns;
+ s64 resume_latency_ns;
s64 effective_constraint_ns;
bool constraint_changed;
bool cached_stop_ok;
@@ -113,7 +97,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
- int need_restore;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -132,29 +115,15 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
-extern int __pm_genpd_name_add_device(const char *domain_name,
- struct device *dev,
- struct gpd_timing_data *td);
-
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev);
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
-extern int pm_genpd_add_subdomain_names(const char *master_name,
- const char *subdomain_name);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
-extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
-extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
-extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
-extern int pm_genpd_name_detach_cpuidle(const char *name);
extern void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
-extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
-extern int pm_genpd_name_poweron(const char *domain_name);
-extern void pm_genpd_poweroff_unused(void);
-
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
#else
@@ -173,12 +142,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int __pm_genpd_name_add_device(const char *domain_name,
- struct device *dev,
- struct gpd_timing_data *td)
-{
- return -ENOSYS;
-}
static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev)
{
@@ -189,47 +152,15 @@ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int pm_genpd_add_subdomain_names(const char *master_name,
- const char *subdomain_name)
-{
- return -ENOSYS;
-}
static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target)
{
return -ENOSYS;
}
-static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
-{
- return -ENOSYS;
-}
-static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
-{
- return -ENOSYS;
-}
-static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
-{
- return -ENOSYS;
-}
-static inline int pm_genpd_name_detach_cpuidle(const char *name)
-{
- return -ENOSYS;
-}
static inline void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
}
-static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
-{
- return -ENOSYS;
-}
-static inline int pm_genpd_name_poweron(const char *domain_name)
-{
- return -ENOSYS;
-}
-static inline void pm_genpd_poweroff_unused(void) {}
-#define simple_qos_governor NULL
-#define pm_domain_always_on_gov NULL
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
@@ -238,12 +169,6 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
return __pm_genpd_add_device(genpd, dev, NULL);
}
-static inline int pm_genpd_name_add_device(const char *domain_name,
- struct device *dev)
-{
- return __pm_genpd_name_add_device(domain_name, dev, NULL);
-}
-
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
extern void pm_genpd_syscore_poweroff(struct device *dev);
extern void pm_genpd_syscore_poweron(struct device *dev);
diff --git a/kernel/include/linux/pm_opp.h b/kernel/include/linux/pm_opp.h
index cec2d4540..9a2e50337 100644
--- a/kernel/include/linux/pm_opp.h
+++ b/kernel/include/linux/pm_opp.h
@@ -30,7 +30,11 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -62,11 +66,26 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ return false;
+}
+
static inline int dev_pm_opp_get_opp_count(struct device *dev)
{
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ return NULL;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
@@ -113,16 +132,39 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-void of_free_opp_table(struct device *dev);
+int dev_pm_opp_of_add_table(struct device *dev);
+void dev_pm_opp_of_remove_table(struct device *dev);
+int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
+void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
#else
-static inline int of_init_opp_table(struct device *dev)
+static inline int dev_pm_opp_of_add_table(struct device *dev)
{
return -EINVAL;
}
-static inline void of_free_opp_table(struct device *dev)
+static inline void dev_pm_opp_of_remove_table(struct device *dev)
+{
+}
+
+static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
{
+ return -ENOSYS;
}
#endif
diff --git a/kernel/include/linux/pm_qos.h b/kernel/include/linux/pm_qos.h
index 7b3ae0cff..0f65d36c2 100644
--- a/kernel/include/linux/pm_qos.h
+++ b/kernel/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+int dev_pm_qos_expose_latency_tolerance(struct device *dev);
+void dev_pm_qos_hide_latency_tolerance(struct device *dev);
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{ return 0; }
+static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+ { return 0; }
+static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/kernel/include/linux/pm_runtime.h b/kernel/include/linux/pm_runtime.h
index 30e84d48b..3bdbb4189 100644
--- a/kernel/include/linux/pm_runtime.h
+++ b/kernel/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
return dev->power.runtime_status == RPM_SUSPENDED;
}
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
-{
- return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
-}
-
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/kernel/include/linux/pm_wakeirq.h b/kernel/include/linux/pm_wakeirq.h
new file mode 100644
index 000000000..cd5b62db9
--- /dev/null
+++ b/kernel/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
+/*
+ * pm_wakeirq.h - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PM_WAKEIRQ_H
+#define _LINUX_PM_WAKEIRQ_H
+
+#ifdef CONFIG_PM
+
+extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+ int irq);
+extern void dev_pm_clear_wake_irq(struct device *dev);
+extern void dev_pm_enable_wake_irq(struct device *dev);
+extern void dev_pm_disable_wake_irq(struct device *dev);
+
+#else /* !CONFIG_PM */
+
+static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline void dev_pm_clear_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_enable_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_disable_wake_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/kernel/include/linux/pm_wakeup.h b/kernel/include/linux/pm_wakeup.h
index a0f70808d..a3447932d 100644
--- a/kernel/include/linux/pm_wakeup.h
+++ b/kernel/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
#include <linux/types.h>
+struct wake_irq;
+
/**
* struct wakeup_source - Representation of wakeup sources
*
+ * @name: Name of the wakeup source
+ * @entry: Wakeup source list entry
+ * @lock: Wakeup source lock
+ * @wakeirq: Optional device specific wakeirq
+ * @timer: Wakeup timer list
+ * @timer_expires: Wakeup timer expiration
* @total_time: Total time this wakeup source has been active.
* @max_time: Maximum time this wakeup source has been continuously active.
* @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
const char *name;
struct list_head entry;
spinlock_t lock;
+ struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;
diff --git a/kernel/include/linux/pmem.h b/kernel/include/linux/pmem.h
new file mode 100644
index 000000000..acfea8ce4
--- /dev/null
+++ b/kernel/include/linux/pmem.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __PMEM_H__
+#define __PMEM_H__
+
+#include <linux/io.h>
+#include <linux/uio.h>
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+#include <asm/pmem.h>
+#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+/*
+ * These are simply here to enable compilation, all call sites gate
+ * calling these symbols with arch_has_pmem_api() and redirect to the
+ * implementation in asm/pmem.h.
+ */
+static inline bool __arch_has_wmb_pmem(void)
+{
+ return false;
+}
+
+static inline void arch_wmb_pmem(void)
+{
+ BUG();
+}
+
+static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t n)
+{
+ BUG();
+}
+
+static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ BUG();
+ return 0;
+}
+
+static inline void arch_clear_pmem(void __pmem *addr, size_t size)
+{
+ BUG();
+}
+#endif
+
+/*
+ * Architectures that define ARCH_HAS_PMEM_API must provide
+ * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
+ * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
+ */
+static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
+{
+ memcpy(dst, (void __force const *) src, size);
+}
+
+static inline bool arch_has_pmem_api(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+}
+
+/**
+ * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
+ *
+ * For a given cpu implementation within an architecture it is possible
+ * that wmb_pmem() resolves to a nop. In the case this returns
+ * false, pmem api users are unable to ensure durability and may want to
+ * fall back to a different data consistency model, or otherwise notify
+ * the user.
+ */
+static inline bool arch_has_wmb_pmem(void)
+{
+ return arch_has_pmem_api() && __arch_has_wmb_pmem();
+}
+
+/*
+ * These defaults seek to offer decent performance and minimize the
+ * window between i/o completion and writes being durable on media.
+ * However, it is undefined / architecture specific whether
+ * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for
+ * making data durable relative to i/o completion.
+ */
+static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t size)
+{
+ memcpy((void __force *) dst, src, size);
+}
+
+static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter_nocache((void __force *)addr, bytes, i);
+}
+
+static inline void default_clear_pmem(void __pmem *addr, size_t size)
+{
+ if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
+ clear_page((void __force *)addr);
+ else
+ memset((void __force *)addr, 0, size);
+}
+
+/**
+ * memcpy_to_pmem - copy data to persistent memory
+ * @dst: destination buffer for the copy
+ * @src: source buffer for the copy
+ * @n: length of the copy in bytes
+ *
+ * Perform a memory copy that results in the destination of the copy
+ * being effectively evicted from, or never written to, the processor
+ * cache hierarchy after the copy completes. After memcpy_to_pmem()
+ * data may still reside in cpu or platform buffers, so this operation
+ * must be followed by a wmb_pmem().
+ */
+static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
+{
+ if (arch_has_pmem_api())
+ arch_memcpy_to_pmem(dst, src, n);
+ else
+ default_memcpy_to_pmem(dst, src, n);
+}
+
+/**
+ * wmb_pmem - synchronize writes to persistent memory
+ *
+ * After a series of memcpy_to_pmem() operations this drains data from
+ * cpu write buffers and any platform (memory controller) buffers to
+ * ensure that written data is durable on persistent memory media.
+ */
+static inline void wmb_pmem(void)
+{
+ if (arch_has_wmb_pmem())
+ arch_wmb_pmem();
+ else
+ wmb();
+}
+
+/**
+ * copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr: PMEM destination address
+ * @bytes: number of bytes to copy
+ * @i: iterator with source data
+ *
+ * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ if (arch_has_pmem_api())
+ return arch_copy_from_iter_pmem(addr, bytes, i);
+ return default_copy_from_iter_pmem(addr, bytes, i);
+}
+
+/**
+ * clear_pmem - zero a PMEM memory range
+ * @addr: virtual start address
+ * @size: number of bytes to zero
+ *
+ * Write zeros into the memory range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline void clear_pmem(void __pmem *addr, size_t size)
+{
+ if (arch_has_pmem_api())
+ arch_clear_pmem(addr, size);
+ else
+ default_clear_pmem(addr, size);
+}
+#endif /* __PMEM_H__ */
diff --git a/kernel/include/linux/poison.h b/kernel/include/linux/poison.h
index 2110a81c5..317e16de0 100644
--- a/kernel/include/linux/poison.h
+++ b/kernel/include/linux/poison.h
@@ -19,8 +19,8 @@
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
@@ -69,10 +69,6 @@
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
-/********** net/ **********/
-#define NEIGHBOR_DEAD 0xdeadbeef
-#define NETFILTER_LINK_POISON 0xdead57ac
-
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
@@ -83,7 +79,4 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
-/********** sound/oss/ **********/
-#define OSS_POISON_FREE 0xAB
-
#endif
diff --git a/kernel/include/linux/power/bq27x00_battery.h b/kernel/include/linux/power/bq27x00_battery.h
deleted file mode 100644
index a857f719b..000000000
--- a/kernel/include/linux/power/bq27x00_battery.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_BQ27X00_BATTERY_H__
-#define __LINUX_BQ27X00_BATTERY_H__
-
-/**
- * struct bq27000_plaform_data - Platform data for bq27000 devices
- * @name: Name of the battery. If NULL the driver will fallback to "bq27000".
- * @read: HDQ read callback.
- * This function should provide access to the HDQ bus the battery is
- * connected to.
- * The first parameter is a pointer to the battery device, the second the
- * register to be read. The return value should either be the content of
- * the passed register or an error value.
- */
-struct bq27000_platform_data {
- const char *name;
- int (*read)(struct device *dev, unsigned int);
-};
-
-#endif
diff --git a/kernel/include/linux/power/bq27xxx_battery.h b/kernel/include/linux/power/bq27xxx_battery.h
new file mode 100644
index 000000000..45f6a7b5b
--- /dev/null
+++ b/kernel/include/linux/power/bq27xxx_battery.h
@@ -0,0 +1,31 @@
+#ifndef __LINUX_BQ27X00_BATTERY_H__
+#define __LINUX_BQ27X00_BATTERY_H__
+
+/**
+ * struct bq27xxx_plaform_data - Platform data for bq27xxx devices
+ * @name: Name of the battery.
+ * @chip: Chip class number of this device.
+ * @read: HDQ read callback.
+ * This function should provide access to the HDQ bus the battery is
+ * connected to.
+ * The first parameter is a pointer to the battery device, the second the
+ * register to be read. The return value should either be the content of
+ * the passed register or an error value.
+ */
+enum bq27xxx_chip {
+ BQ27000 = 1, /* bq27000, bq27200 */
+ BQ27010, /* bq27010, bq27210 */
+ BQ27500, /* bq27500, bq27510, bq27520 */
+ BQ27530, /* bq27530, bq27531 */
+ BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
+ BQ27545, /* bq27545 */
+ BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
+};
+
+struct bq27xxx_platform_data {
+ const char *name;
+ enum bq27xxx_chip chip;
+ int (*read)(struct device *dev, unsigned int);
+};
+
+#endif
diff --git a/kernel/include/linux/power/charger-manager.h b/kernel/include/linux/power/charger-manager.h
index eadf28cb2..c4fa907c8 100644
--- a/kernel/include/linux/power/charger-manager.h
+++ b/kernel/include/linux/power/charger-manager.h
@@ -65,7 +65,7 @@ struct charger_cable {
const char *extcon_name;
const char *name;
- /* The charger-manager use Exton framework*/
+ /* The charger-manager use Extcon framework */
struct extcon_specific_cable_nb extcon_dev;
struct work_struct wq;
struct notifier_block nb;
@@ -94,7 +94,7 @@ struct charger_cable {
* the charger will be maintained with disabled state.
* @cables:
* the array of charger cables to enable/disable charger
- * and set current limit according to constratint data of
+ * and set current limit according to constraint data of
* struct charger_cable if only charger cable included
* in the array of charger cables is attached/detached.
* @num_cables: the number of charger cables.
@@ -148,7 +148,7 @@ struct charger_regulator {
* @polling_interval_ms: interval in millisecond at which
* charger manager will monitor battery health
* @battery_present:
- * Specify where information for existance of battery can be obtained
+ * Specify where information for existence of battery can be obtained
* @psy_charger_stat: the names of power-supply for chargers
* @num_charger_regulator: the number of entries in charger_regulators
* @charger_regulators: array of charger regulators
@@ -156,7 +156,7 @@ struct charger_regulator {
* @thermal_zone : the name of thermal zone for battery
* @temp_min : Minimum battery temperature for charging.
* @temp_max : Maximum battery temperature for charging.
- * @temp_diff : Temperature diffential to restart charging.
+ * @temp_diff : Temperature difference to restart charging.
* @measure_battery_temp:
* true: measure battery temperature
* false: measure ambient temperature
diff --git a/kernel/include/linux/power/max17042_battery.h b/kernel/include/linux/power/max17042_battery.h
index cf112b407..522757ac9 100644
--- a/kernel/include/linux/power/max17042_battery.h
+++ b/kernel/include/linux/power/max17042_battery.h
@@ -215,6 +215,10 @@ struct max17042_platform_data {
* the datasheet although it can be changed by board designers.
*/
unsigned int r_sns;
+ int vmin; /* in millivolts */
+ int vmax; /* in millivolts */
+ int temp_min; /* in tenths of degree Celsius */
+ int temp_max; /* in tenths of degree Celsius */
};
#endif /* __MAX17042_BATTERY_H_ */
diff --git a/kernel/include/linux/power_supply.h b/kernel/include/linux/power_supply.h
index a80f1fd01..ef9f15921 100644
--- a/kernel/include/linux/power_supply.h
+++ b/kernel/include/linux/power_supply.h
@@ -206,6 +206,11 @@ struct power_supply_desc {
int (*set_property)(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+ /*
+ * property_is_writeable() will be called during registration
+ * of power supply. If this happens during device probe then it must
+ * not access internal data of device (because probe did not end).
+ */
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
@@ -287,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy);
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
+extern struct power_supply *devm_power_supply_get_by_phandle(
+ struct device *dev, const char *property);
#else /* !CONFIG_OF */
static inline struct power_supply *
power_supply_get_by_phandle(struct device_node *np, const char *property)
{ return NULL; }
+static inline struct power_supply *
+devm_power_supply_get_by_phandle(struct device *dev, const char *property)
+{ return NULL; }
#endif /* CONFIG_OF */
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
diff --git a/kernel/include/linux/pps_kernel.h b/kernel/include/linux/pps_kernel.h
index 1d2cd2124..54bf1484d 100644
--- a/kernel/include/linux/pps_kernel.h
+++ b/kernel/include/linux/pps_kernel.h
@@ -48,9 +48,9 @@ struct pps_source_info {
struct pps_event_time {
#ifdef CONFIG_NTP_PPS
- struct timespec ts_raw;
+ struct timespec64 ts_raw;
#endif /* CONFIG_NTP_PPS */
- struct timespec ts_real;
+ struct timespec64 ts_real;
};
/* The main struct */
@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps,
struct pps_device *pps_lookup_dev(void const *cookie);
static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
- struct timespec ts)
+ struct timespec64 ts)
{
kt->sec = ts.tv_sec;
kt->nsec = ts.tv_nsec;
@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
static inline void pps_get_ts(struct pps_event_time *ts)
{
- getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+ ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
}
#else /* CONFIG_NTP_PPS */
static inline void pps_get_ts(struct pps_event_time *ts)
{
- getnstimeofday(&ts->ts_real);
+ ktime_get_real_ts64(&ts->ts_real);
}
#endif /* CONFIG_NTP_PPS */
/* Subtract known time delay from PPS event time(s) */
-static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
+static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
{
- ts->ts_real = timespec_sub(ts->ts_real, delta);
+ ts->ts_real = timespec64_sub(ts->ts_real, delta);
#ifdef CONFIG_NTP_PPS
- ts->ts_raw = timespec_sub(ts->ts_raw, delta);
+ ts->ts_raw = timespec64_sub(ts->ts_raw, delta);
#endif
}
diff --git a/kernel/include/linux/pr.h b/kernel/include/linux/pr.h
new file mode 100644
index 000000000..65c01c10b
--- /dev/null
+++ b/kernel/include/linux/pr.h
@@ -0,0 +1,18 @@
+#ifndef LINUX_PR_H
+#define LINUX_PR_H
+
+#include <uapi/linux/pr.h>
+
+struct pr_ops {
+ int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags);
+ int (*pr_reserve)(struct block_device *bdev, u64 key,
+ enum pr_type type, u32 flags);
+ int (*pr_release)(struct block_device *bdev, u64 key,
+ enum pr_type type);
+ int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
+ enum pr_type type, bool abort);
+ int (*pr_clear)(struct block_device *bdev, u64 key);
+};
+
+#endif /* LINUX_PR_H */
diff --git a/kernel/include/linux/preempt.h b/kernel/include/linux/preempt.h
index 66587bf45..1cfb1cb72 100644
--- a/kernel/include/linux/preempt.h
+++ b/kernel/include/linux/preempt.h
@@ -10,17 +10,137 @@
#include <linux/list.h>
/*
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
- * the other bits -- can't include that header due to inclusion hell.
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x000f0000
+ * NMI_MASK: 0x00100000
+ * PREEMPT_NEED_RESCHED: 0x80000000
*/
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 1
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET (0)
+#endif
+
+/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count() (0UL)
+extern int in_serving_softirq(void);
+#endif
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+/*
+ * The preempt_count offset after preempt_disable();
+ */
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
+#else
+# define PREEMPT_DISABLE_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset after spin_lock()
+ */
+#if !defined(CONFIG_PREEMPT_RT_FULL)
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+#define PREEMPT_LOCK_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() (preempt_count() != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
+
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+ ({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -75,6 +195,8 @@ do { \
# define preempt_check_resched_rt() barrier();
#endif
+#define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
@@ -83,9 +205,16 @@ do { \
__preempt_schedule(); \
} while (0)
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_notrace(); \
+} while (0)
+
#define preempt_check_resched() \
do { \
- if (should_resched()) \
+ if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
@@ -96,46 +225,33 @@ do { \
preempt_check_resched(); \
} while (0)
-#else
+#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
-#define preempt_check_resched() do { } while (0)
-#endif
-
-#define preempt_disable_notrace() \
-do { \
- __preempt_count_inc(); \
- barrier(); \
-} while (0)
-#define preempt_enable_no_resched_notrace() \
+#define preempt_enable_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT
-
-#ifndef CONFIG_CONTEXT_TRACKING
-#define __preempt_schedule_context() __preempt_schedule()
-#endif
+#define preempt_check_resched() do { } while (0)
+#endif /* CONFIG_PREEMPT */
-#define preempt_enable_notrace() \
+#define preempt_disable_notrace() \
do { \
+ __preempt_count_inc(); \
barrier(); \
- if (unlikely(__preempt_count_dec_and_test())) \
- __preempt_schedule_context(); \
} while (0)
-#else
-#define preempt_enable_notrace() \
+
+#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -155,6 +271,7 @@ do { \
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preempt_check_resched_rt() barrier()
+#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
@@ -235,6 +352,8 @@ struct preempt_notifier {
struct preempt_ops *ops;
};
+void preempt_notifier_inc(void);
+void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
diff --git a/kernel/include/linux/preempt_mask.h b/kernel/include/linux/preempt_mask.h
deleted file mode 100644
index c7e373dc7..000000000
--- a/kernel/include/linux/preempt_mask.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef LINUX_PREEMPT_MASK_H
-#define LINUX_PREEMPT_MASK_H
-
-#include <linux/preempt.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count could in theory be the same as the number of
- * interrupts in the system, but we run all interrupt handlers with
- * interrupts disabled, so we cannot have nesting interrupts. Though
- * there are a few palaeontologic drivers which reenable interrupts in
- * the handler, so we need more than one bit here.
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
- * PREEMPT_ACTIVE: 0x00200000
- */
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-#define HARDIRQ_BITS 4
-#define NMI_BITS 1
-
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
-
-#define __IRQ_MASK(x) ((1UL << (x))-1)
-
-#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-#define NMI_OFFSET (1UL << NMI_SHIFT)
-
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
-
-#define PREEMPT_ACTIVE_BITS 1
-#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
-#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#else
-# define softirq_count() (0UL)
-extern int in_serving_softirq(void);
-#endif
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
- */
-#define in_irq() (hardirq_count())
-#define in_softirq() (softirq_count())
-#define in_interrupt() (irq_count())
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
-
-#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_CHECK_OFFSET 0
-#endif
-
-/*
- * The preempt_count offset needed for things like:
- *
- * spin_lock_bh()
- *
- * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
- * softirqs, such that unlock sequences of:
- *
- * spin_unlock();
- * local_bh_enable();
- *
- * Work as expected.
- */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
-
-/*
- * Are we running in atomic context? WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels. Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
-#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-
-/*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler, *after* releasing the kernel lock)
- */
-#define in_atomic_preempt_off() \
- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
-
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
-
-#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/kernel/include/linux/printk.h b/kernel/include/linux/printk.h
index 08d0a7574..9cdca696b 100644
--- a/kernel/include/linux/printk.h
+++ b/kernel/include/linux/printk.h
@@ -30,6 +30,8 @@ static inline const char *printk_skip_level(const char *buffer)
return buffer;
}
+#define CONSOLE_EXT_LOG_MAX 8192
+
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
@@ -122,7 +124,7 @@ void early_printk(const char *s, ...) { }
static inline void printk_kill(void) { }
#endif
-typedef int(*printk_func_t)(const char *fmt, va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
@@ -166,7 +168,7 @@ char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
-void dump_stack_set_arch_desc(const char *fmt, ...);
+__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
#else
@@ -217,7 +219,7 @@ static inline void setup_log_buf(int early)
{
}
-static inline void dump_stack_set_arch_desc(const char *fmt, ...)
+static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
{
}
@@ -404,10 +406,10 @@ do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
@@ -456,11 +458,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#else
+#elif defined(DEBUG)
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+#else
+static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+#endif
#endif
diff --git a/kernel/include/linux/property.h b/kernel/include/linux/property.h
index de8bdf417..0a3705a7c 100644
--- a/kernel/include/linux/property.h
+++ b/kernel/include/linux/property.h
@@ -27,6 +27,12 @@ enum dev_prop_type {
DEV_PROP_MAX,
};
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED,
+ DEV_DMA_NON_COHERENT,
+ DEV_DMA_COHERENT,
+};
+
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
@@ -40,6 +46,8 @@ int device_property_read_string_array(struct device *dev, const char *propname,
const char **val, size_t nval);
int device_property_read_string(struct device *dev, const char *propname,
const char **val);
+int device_property_match_string(struct device *dev,
+ const char *propname, const char *string);
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
@@ -59,6 +67,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
size_t nval);
int fwnode_property_read_string(struct fwnode_handle *fwnode,
const char *propname, const char **val);
+int fwnode_property_match_string(struct fwnode_handle *fwnode,
+ const char *propname, const char *string);
struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child);
@@ -164,4 +174,12 @@ struct property_set {
void device_add_property_set(struct device *dev, struct property_set *pset);
+bool device_dma_supported(struct device *dev);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev);
+
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/kernel/include/linux/proportions.h b/kernel/include/linux/proportions.h
index 00e8e8fa7..21221338a 100644
--- a/kernel/include/linux/proportions.h
+++ b/kernel/include/linux/proportions.h
@@ -1,7 +1,7 @@
/*
* FLoating proportions
*
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* This file contains the public data structure and API definitions.
*/
@@ -33,7 +33,7 @@ struct prop_global {
/*
* global proportion descriptor
*
- * this is needed to consitently flip prop_global structures.
+ * this is needed to consistently flip prop_global structures.
*/
struct prop_descriptor {
int index;
diff --git a/kernel/include/linux/psci.h b/kernel/include/linux/psci.h
new file mode 100644
index 000000000..12c486545
--- /dev/null
+++ b/kernel/include/linux/psci.h
@@ -0,0 +1,54 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+bool psci_tos_resident_on(int cpu);
+bool psci_power_state_loses_context(u32 state);
+bool psci_power_state_is_valid(u32 state);
+
+struct psci_operations {
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/kernel/include/linux/pstore.h b/kernel/include/linux/pstore.h
index 8e7a25b06..831479f8d 100644
--- a/kernel/include/linux/pstore.h
+++ b/kernel/include/linux/pstore.h
@@ -75,20 +75,8 @@ struct pstore_info {
#define PSTORE_FLAGS_FRAGILE 1
-#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
+extern void pstore_unregister(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
-#else
-static inline int
-pstore_register(struct pstore_info *psi)
-{
- return -ENODEV;
-}
-static inline bool
-pstore_cannot_block_path(enum kmsg_dump_reason reason)
-{
- return false;
-}
-#endif
#endif /*_LINUX_PSTORE_H*/
diff --git a/kernel/include/linux/ptp_classify.h b/kernel/include/linux/ptp_classify.h
index 159c987b1..a079656b6 100644
--- a/kernel/include/linux/ptp_classify.h
+++ b/kernel/include/linux/ptp_classify.h
@@ -32,9 +32,9 @@
#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */
#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */
#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */
-#define PTP_CLASS_L2 0x30 /* event in a L2 packet */
-#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */
-#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */
+#define PTP_CLASS_L2 0x40 /* event in a L2 packet */
+#define PTP_CLASS_PMASK 0x70 /* mask for the packet type field */
+#define PTP_CLASS_VLAN 0x80 /* event in a VLAN tagged packet */
#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
@@ -42,6 +42,7 @@
#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
+#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6)
#define PTP_EV_PORT 319
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
diff --git a/kernel/include/linux/ptrace.h b/kernel/include/linux/ptrace.h
index 987a73a40..504c98a27 100644
--- a/kernel/include/linux/ptrace.h
+++ b/kernel/include/linux/ptrace.h
@@ -34,6 +34,7 @@
#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
/* single stepping state bits (used on ARM and PA-RISC) */
#define PT_SINGLESTEP_BIT 31
@@ -56,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
diff --git a/kernel/include/linux/pwm.h b/kernel/include/linux/pwm.h
index e90628cac..cfc3ed46c 100644
--- a/kernel/include/linux/pwm.h
+++ b/kernel/include/linux/pwm.h
@@ -2,6 +2,7 @@
#define __LINUX_PWM_H
#include <linux/err.h>
+#include <linux/mutex.h>
#include <linux/of.h>
struct pwm_device;
@@ -79,26 +80,45 @@ enum {
PWMF_EXPORTED = 1 << 2,
};
+/**
+ * struct pwm_device - PWM channel object
+ * @label: name of the PWM device
+ * @flags: flags associated with the PWM device
+ * @hwpwm: per-chip relative index of the PWM device
+ * @pwm: global index of the PWM device
+ * @chip: PWM chip providing this PWM device
+ * @chip_data: chip-private data associated with the PWM device
+ * @lock: used to serialize accesses to the PWM device where necessary
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * @polarity: polarity of the PWM signal
+ */
struct pwm_device {
- const char *label;
- unsigned long flags;
- unsigned int hwpwm;
- unsigned int pwm;
- struct pwm_chip *chip;
- void *chip_data;
-
- unsigned int period; /* in nanoseconds */
- unsigned int duty_cycle; /* in nanoseconds */
- enum pwm_polarity polarity;
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+ struct mutex lock;
+
+ unsigned int period;
+ unsigned int duty_cycle;
+ enum pwm_polarity polarity;
};
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+ return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
-static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
@@ -109,7 +129,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
@@ -119,6 +139,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
+{
+ return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+}
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
@@ -131,25 +156,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
- int (*request)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*free)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
+ int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip,
- struct seq_file *s);
+ void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
- struct module *owner;
+ struct module *owner;
};
/**
@@ -160,28 +178,32 @@ struct pwm_ops {
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @pwms: array of PWM devices allocated by the framework
+ * @of_xlate: request a PWM device given a device tree PWM specifier
+ * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
* @can_sleep: must be true if the .config(), .enable() or .disable()
* operations may sleep
*/
struct pwm_chip {
- struct device *dev;
- struct list_head list;
- const struct pwm_ops *ops;
- int base;
- unsigned int npwm;
-
- struct pwm_device *pwms;
-
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
- const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
- bool can_sleep;
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
};
#if IS_ENABLED(CONFIG_PWM)
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
+int pwmchip_add_with_polarity(struct pwm_chip *chip,
+ enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
int pwmchip_remove(struct pwm_chip *chip);
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
@@ -217,6 +239,11 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
+static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
@@ -290,10 +317,15 @@ struct pwm_lookup {
#if IS_ENABLED(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
+void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
{
}
+
+static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
+{
+}
#endif
#ifdef CONFIG_PWM_SYSFS
diff --git a/kernel/include/linux/pxa2xx_ssp.h b/kernel/include/linux/pxa2xx_ssp.h
index dab545bb6..c2f2574ff 100644
--- a/kernel/include/linux/pxa2xx_ssp.h
+++ b/kernel/include/linux/pxa2xx_ssp.h
@@ -194,8 +194,11 @@ enum pxa_ssp_type {
PXA168_SSP,
PXA910_SSP,
CE4100_SSP,
- LPSS_SSP,
QUARK_X1000_SSP,
+ LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_BYT_SSP,
+ LPSS_SPT_SSP,
+ LPSS_BXT_SSP,
};
struct ssp_device {
diff --git a/kernel/include/linux/qcom_scm.h b/kernel/include/linux/qcom_scm.h
index d7a974d5f..9e1200091 100644
--- a/kernel/include/linux/qcom_scm.h
+++ b/kernel/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -16,6 +16,19 @@
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+extern bool qcom_scm_is_available(void);
+
+extern bool qcom_scm_hdcp_available(void);
+extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+ u32 *resp);
+
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
diff --git a/kernel/include/linux/qed/common_hsi.h b/kernel/include/linux/qed/common_hsi.h
new file mode 100644
index 000000000..1d1ba2c5e
--- /dev/null
+++ b/kernel/include/linux/qed/common_hsi.h
@@ -0,0 +1,609 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 4
+#define FW_REVISION_VERSION 2
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_BB (256)
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2 (4)
+#define NUM_OF_PHYS_TCS (8)
+
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO (8)
+
+#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2 512
+#define MAX_QM_TX_QUEUES_BB 448
+#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE 0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH 16
+#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH 24
+#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH 32
+#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH 32
+#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX 0
+#define CAU_FSM_ETH_TX 1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB 12
+
+#define CAU_HC_STOPPED_STATE 3
+#define CAU_HC_DISABLE_STATE 4
+#define CAU_HC_ENABLE_STATE 0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_TOT_SB_PER_PATH \
+ MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE 0x0600
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS 12
+#define PXP_PER_PF_ENTRY_SIZE 8
+#define PXP_NUM_GLOBAL_WINDOWS 243
+#define PXP_GLOBAL_ENTRY_SIZE 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_PF_WINDOW_ADMIN_START 0
+#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
+#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
+ PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
+ PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
+#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+ __le32 cid;
+ __le16 itid;
+ u8 error_code;
+ u8 fw_debug_param;
+};
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct async_data async_info;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+ SF,
+ MF_OVLAN,
+ MF_NPAR,
+ MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+ PROTOCOLID_RESERVED1,
+ PROTOCOLID_RESERVED2,
+ PROTOCOLID_RESERVED3,
+ PROTOCOLID_CORE,
+ PROTOCOLID_ETH,
+ PROTOCOLID_RESERVED4,
+ PROTOCOLID_RESERVED5,
+ PROTOCOLID_PREROCE,
+ PROTOCOLID_COMMON,
+ PROTOCOLID_RESERVED6,
+ MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+ u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+ u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
+};
+
+/* core doorbell data */
+struct core_db_data {
+ u8 params;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+ DB_AGG_CMD_NOP,
+ DB_AGG_CMD_SET,
+ DB_AGG_CMD_ADD,
+ DB_AGG_CMD_MAX,
+ MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+ DB_DEST_XCM,
+ DB_DEST_UCM,
+ DB_DEST_TCM,
+ DB_NUM_DESTINATIONS,
+ MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+ __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+ IGU_INT_ENABLE = 0,
+ IGU_INT_DISABLE = 1,
+ IGU_INT_NOP = 2,
+ IGU_INT_NOP2 = 3,
+ MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+ u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_REG = 0,
+ IGU_SEG_ACCESS_ATTN = 1,
+ MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+ __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+ __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
+};
+
+struct pxp_pretend_concrete_fid {
+ __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+};
+
+union pxp_pretend_fid {
+ struct pxp_pretend_concrete_fid concrete_fid;
+ __le16 opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
+ __le32 sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/kernel/include/linux/qed/eth_common.h b/kernel/include/linux/qed/eth_common.h
new file mode 100644
index 000000000..320b3373a
--- /dev/null
+++ b/kernel/include/linux/qed/eth_common.h
@@ -0,0 +1,279 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE 64
+
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+#define ETH_RX_NUM_NEXT_PAGE_SGES 2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define ETH_REG_CQE_PBL_SIZE 3
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+
+/* ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_SGL_SIZE 3
+#define ETH_TPA_CQE_CONT_SGL_SIZE 6
+#define ETH_TPA_CQE_END_SGL_SIZE 4
+
+/* Queue Zone sizes */
+#define TSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
+#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
+#define XSTORM_QZONE_SIZE 0
+#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
+#define PSTORM_QZONE_SIZE 0
+
+/* Interrupt coalescing TimeSet */
+struct coalescing_timeset {
+ u8 timeset;
+ u8 valid;
+};
+
+struct eth_tx_1st_bd_flags {
+ u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet. */
+struct eth_tx_data_1st_bd {
+ __le16 vlan;
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 fw_use_only;
+};
+
+/* The parsing information data for the second tx bd of a given packet. */
+struct eth_tx_data_2nd_bd {
+ __le16 tunn_ip_size;
+ __le16 bitfields;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+ __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15
+};
+
+/* Regular ETH Rx FP CQE. */
+struct eth_fast_path_rx_reg_cqe {
+ u8 type;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+ __le16 pkt_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_bd;
+ u8 placement_offset;
+ u8 reserved;
+ __le16 pbl[ETH_REG_CQE_PBL_SIZE];
+ u8 reserved1[10];
+};
+
+/* The L4 pseudo checksum mode for Ethernet */
+enum eth_l4_pseudo_checksum_mode {
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+ struct regpair addr;
+};
+
+/* regular ETH Rx SP CQE */
+struct eth_slow_path_rx_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[27];
+ __le16 echo;
+};
+
+/* union for all ETH Rx CQE types */
+union eth_rx_cqe {
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_slow_path_rx_cqe slow_path;
+};
+
+/* ETH Rx CQE type */
+enum eth_rx_cqe_type {
+ ETH_RX_CQE_TYPE_UNUSED,
+ ETH_RX_CQE_TYPE_REGULAR,
+ ETH_RX_CQE_TYPE_SLOW_PATH,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+/* ETH Rx producers data */
+struct eth_rx_prod_data {
+ __le16 bd_prod;
+ __le16 sge_prod;
+ __le16 cqe_prod;
+ __le16 reserved;
+};
+
+/* The first tx bd of a given packet */
+struct eth_tx_1st_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_1st_bd data;
+};
+
+/* The second tx bd of a given packet */
+struct eth_tx_2nd_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_2nd_bd data;
+};
+
+/* The parsing information data for the third tx bd of a given packet. */
+struct eth_tx_data_3rd_bd {
+ __le16 lso_mss;
+ u8 bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+ u8 resereved0[3];
+};
+
+/* The third tx bd of a given packet */
+struct eth_tx_3rd_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_3rd_bd data;
+};
+
+/* The common non-special TX BD ring element */
+struct eth_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+union eth_tx_bd_types {
+ struct eth_tx_1st_bd first_bd;
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_bd reg_bd;
+};
+
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[2];
+};
+
+/* Ustorm Queue Zone */
+struct ustorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ __le16 reserved[3];
+};
+
+/* Ystorm Queue Zone */
+struct ystorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ __le16 reserved[3];
+};
+
+/* ETH doorbell data */
+struct eth_db_data {
+ u8 params;
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/kernel/include/linux/qed/qed_chain.h b/kernel/include/linux/qed/qed_chain.h
new file mode 100644
index 000000000..41b9049b5
--- /dev/null
+++ b/kernel/include/linux/qed/qed_chain.h
@@ -0,0 +1,540 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CHAIN_H
+#define _QED_CHAIN_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+
+enum qed_chain_mode {
+ /* Each Page contains a next pointer at its end */
+ QED_CHAIN_MODE_NEXT_PTR,
+
+ /* Chain is a single page (next ptr) is unrequired */
+ QED_CHAIN_MODE_SINGLE,
+
+ /* Page pointers are located in a side list */
+ QED_CHAIN_MODE_PBL,
+};
+
+enum qed_chain_use_mode {
+ QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+};
+
+struct qed_chain_next {
+ struct regpair next_phys;
+ void *next_virt;
+};
+
+struct qed_chain_pbl {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct qed_chain {
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+ void *p_prod_elem;
+ void *p_cons_elem;
+ u16 page_cnt;
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use; /* used to produce/consume */
+ u16 capacity; /*< number of _usable_ elements */
+ u16 size; /* number of elements */
+ u16 prod_idx;
+ u16 cons_idx;
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_unusable;
+ u16 usable_per_page;
+ u16 elem_size;
+ u16 next_page_mask;
+ struct qed_chain_pbl pbl;
+};
+
+#define QED_CHAIN_PBL_ENTRY_SIZE (8)
+#define QED_CHAIN_PAGE_SIZE (0x1000)
+#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (1 + ((sizeof(struct qed_chain_next) - 1) / \
+ (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((u32)(ELEMS_PER_PAGE(elem_size) - \
+ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+/* Accessors */
+static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+{
+ return p_chain->prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+{
+ return p_chain->cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+{
+ u16 used;
+
+ /* we don't need to trancate upon assignmet, as we assign u32->u16 */
+ used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
+ (u32)p_chain->cons_idx;
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->prod_idx / p_chain->elem_per_page -
+ p_chain->cons_idx / p_chain->elem_per_page;
+
+ return p_chain->capacity - used;
+}
+
+static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+{
+ return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
+}
+
+static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
+{
+ return qed_chain_get_elem_left(p_chain) == 0;
+}
+
+static inline u16 qed_chain_get_elem_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->elem_per_page;
+}
+
+static inline u16 qed_chain_get_usable_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->usable_per_page;
+}
+
+static inline u16 qed_chain_get_unusable_per_page(
+ struct qed_chain *p_chain)
+{
+ return p_chain->elem_unusable;
+}
+
+static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+{
+ return p_chain->size;
+}
+
+static inline dma_addr_t
+qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+{
+ return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief qed_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static inline void
+qed_chain_advance_page(struct qed_chain *p_chain,
+ void **p_next_elem,
+ u16 *idx_to_inc,
+ u16 *page_to_inc)
+
+{
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ {
+ struct qed_chain_next *p_next = *p_next_elem;
+ *p_next_elem = p_next->next_virt;
+ *idx_to_inc += p_chain->elem_unusable;
+ break;
+ }
+ case QED_CHAIN_MODE_SINGLE:
+ *p_next_elem = p_chain->p_virt_addr;
+ break;
+
+ case QED_CHAIN_MODE_PBL:
+ /* It is assumed pages are sequential, next element needs
+ * to change only when passing going back to first from last.
+ */
+ if (++(*page_to_inc) == p_chain->page_cnt) {
+ *page_to_inc = 0;
+ *p_next_elem = p_chain->p_virt_addr;
+ }
+ }
+}
+
+#define is_unusable_idx(p, idx) \
+ (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_ans_skip(p, idx) \
+ do { \
+ if (is_unusable_idx(p, idx)) { \
+ (p)->idx += (p)->elem_unusable; \
+ } \
+ } while (0)
+
+/**
+ * @brief qed_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static inline void
+qed_chain_return_multi_produced(struct qed_chain *p_chain,
+ u16 num)
+{
+ p_chain->cons_idx += num;
+ test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+{
+ p_chain->cons_idx++;
+ test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_produce(struct qed_chain *p_chain)
+{
+ void *ret = NULL;
+
+ if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
+ p_chain->next_page_mask) {
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ &p_chain->prod_idx,
+ &p_chain->pbl.prod_page_idx);
+ }
+
+ ret = p_chain->p_prod_elem;
+ p_chain->prod_idx++;
+ p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+ p_chain->elem_size);
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return u16, number of unusable BDs
+ */
+static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+{
+ return p_chain->capacity;
+}
+
+/**
+ * @brief qed_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static inline void
+qed_chain_recycle_consumed(struct qed_chain *p_chain)
+{
+ test_ans_skip(p_chain, prod_idx);
+ p_chain->prod_idx++;
+}
+
+/**
+ * @brief qed_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static inline void *qed_chain_consume(struct qed_chain *p_chain)
+{
+ void *ret = NULL;
+
+ if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
+ p_chain->next_page_mask) {
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ &p_chain->cons_idx,
+ &p_chain->pbl.cons_page_idx);
+ }
+
+ ret = p_chain->p_cons_elem;
+ p_chain->cons_idx++;
+ p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+ p_chain->elem_size);
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_reset - Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static inline void qed_chain_reset(struct qed_chain *p_chain)
+{
+ int i;
+
+ p_chain->prod_idx = 0;
+ p_chain->cons_idx = 0;
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1;
+ p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1;
+ }
+
+ switch (p_chain->intended_use) {
+ case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case QED_CHAIN_USE_TO_PRODUCE:
+ /* Do nothing */
+ break;
+
+ case QED_CHAIN_USE_TO_CONSUME:
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
+ qed_chain_recycle_consumed(p_chain);
+ break;
+ }
+}
+
+/**
+ * @brief qed_chain_init - Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param p_virt_addr
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param intended_use
+ * @param mode
+ */
+static inline void qed_chain_init(struct qed_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr,
+ u16 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode)
+{
+ /* chain fixed parameters */
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+ p_chain->elem_size = elem_size;
+ p_chain->page_cnt = page_cnt;
+ p_chain->mode = mode;
+
+ p_chain->intended_use = intended_use;
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->usable_per_page =
+ USABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+
+ p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+
+ p_chain->next_page_mask = (p_chain->usable_per_page &
+ p_chain->elem_per_page_mask);
+
+ if (mode == QED_CHAIN_MODE_NEXT_PTR) {
+ struct qed_chain_next *p_next;
+ u16 i;
+
+ for (i = 0; i < page_cnt - 1; i++) {
+ /* Increment mem_phy to the next page. */
+ p_phys_addr += QED_CHAIN_PAGE_SIZE;
+
+ /* Initialize the physical address of the next page. */
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ elem_size *
+ p_chain->
+ usable_per_page);
+
+ p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
+ p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
+
+ /* Initialize the virtual address of the next page. */
+ p_next->next_virt = (void *)((u8 *)p_virt_addr +
+ QED_CHAIN_PAGE_SIZE);
+
+ /* Move to the next page. */
+ p_virt_addr = p_next->next_virt;
+ }
+
+ /* Last page's next should point to beginning of the chain */
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ elem_size *
+ p_chain->usable_per_page);
+
+ p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr);
+ p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr);
+ p_next->next_virt = p_chain->p_virt_addr;
+ }
+ qed_chain_reset(p_chain);
+}
+
+/**
+ * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
+ * struct
+ * @param p_chain
+ * @param p_virt_addr virtual address of allocated buffer's beginning
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param use_mode
+ * @param p_phys_pbl pointer to a pre-allocated side table
+ * which will hold physical page addresses.
+ * @param p_virt_pbl pointer to a pre allocated side table
+ * which will hold virtual page addresses.
+ */
+static inline void
+qed_chain_pbl_init(struct qed_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr,
+ u16 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode use_mode,
+ dma_addr_t p_phys_pbl,
+ dma_addr_t *p_virt_pbl)
+{
+ dma_addr_t *p_pbl_dma = p_virt_pbl;
+ int i;
+
+ qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
+ elem_size, use_mode, QED_CHAIN_MODE_PBL);
+
+ p_chain->pbl.p_phys_table = p_phys_pbl;
+ p_chain->pbl.p_virt_table = p_virt_pbl;
+
+ /* Fill the PBL with physical addresses*/
+ for (i = 0; i < page_cnt; i++) {
+ *p_pbl_dma = p_phys_addr;
+ p_phys_addr += QED_CHAIN_PAGE_SIZE;
+ p_pbl_dma++;
+ }
+}
+
+/**
+ * @brief qed_chain_set_prod - sets the prod to the given
+ * value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ u16 prod_idx,
+ void *p_prod_elem)
+{
+ p_chain->prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_get_elem -
+ *
+ * get a pointer to an element represented by absolute idx
+ *
+ * @param p_chain
+ * @assumption p_chain->size is a power of 2
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
+ u16 idx)
+{
+ void *ret = NULL;
+
+ if (idx >= p_chain->size)
+ return NULL;
+
+ ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+
+ return ret;
+}
+
+/**
+ * @brief qed_chain_sge_inc_cons_prod
+ *
+ * for sge chains, producer isn't increased serially, the ring
+ * is expected to be full at all times. Once elements are
+ * consumed, they are immediately produced.
+ *
+ * @param p_chain
+ * @param cnt
+ *
+ * @return inline void
+ */
+static inline void
+qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
+ u16 cnt)
+{
+ p_chain->prod_idx += cnt;
+ p_chain->cons_idx += cnt;
+}
+
+#endif
diff --git a/kernel/include/linux/qed/qed_eth_if.h b/kernel/include/linux/qed/qed_eth_if.h
new file mode 100644
index 000000000..81ab178e3
--- /dev/null
+++ b/kernel/include/linux/qed/qed_eth_if.h
@@ -0,0 +1,165 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ETH_IF_H
+#define _QED_ETH_IF_H
+
+#include <linux/list.h>
+#include <linux/if_link.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ u8 num_queues;
+ u8 num_tc;
+
+ u8 port_mac[ETH_ALEN];
+ u8 num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+ u16 rss_ind_table[128];
+ u32 rss_key[10];
+};
+
+struct qed_update_vport_params {
+ u8 vport_id;
+ u8 update_vport_active_flg;
+ u8 vport_active_flg;
+ u8 update_rss_flg;
+ struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_stop_rxq_params {
+ u8 rss_id;
+ u8 rx_queue_id;
+ u8 vport_id;
+ bool eq_completion_only;
+};
+
+struct qed_stop_txq_params {
+ u8 rss_id;
+ u8 tx_queue_id;
+};
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+ QED_FILTER_XCAST_TYPE_ADD,
+ QED_FILTER_XCAST_TYPE_DEL,
+ QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+struct qed_filter_ucast_params {
+ enum qed_filter_xcast_params_type type;
+ u8 vlan_valid;
+ u16 vlan;
+ u8 mac_valid;
+ unsigned char mac[ETH_ALEN];
+};
+
+struct qed_filter_mcast_params {
+ enum qed_filter_xcast_params_type type;
+ u8 num;
+ unsigned char mac[64][ETH_ALEN];
+};
+
+union qed_filter_type_params {
+ enum qed_filter_rx_mode_type accept_flags;
+ struct qed_filter_ucast_params ucast;
+ struct qed_filter_mcast_params mcast;
+};
+
+enum qed_filter_type {
+ QED_FILTER_TYPE_UCAST,
+ QED_FILTER_TYPE_MCAST,
+ QED_FILTER_TYPE_RX_MODE,
+ QED_MAX_FILTER_TYPES,
+};
+
+struct qed_filter_params {
+ enum qed_filter_type type;
+ union qed_filter_type_params filter;
+};
+
+struct qed_queue_start_common_params {
+ u8 rss_id;
+ u8 queue_id;
+ u8 vport_id;
+ u16 sb;
+ u16 sb_idx;
+};
+
+struct qed_eth_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie);
+
+ int (*vport_start)(struct qed_dev *cdev,
+ u8 vport_id, u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg);
+
+ int (*vport_stop)(struct qed_dev *cdev,
+ u8 vport_id);
+
+ int (*vport_update)(struct qed_dev *cdev,
+ struct qed_update_vport_params *params);
+
+ int (*q_rx_start)(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod);
+
+ int (*q_rx_stop)(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params);
+
+ int (*q_tx_start)(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell);
+
+ int (*q_tx_stop)(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params);
+
+ int (*filter_config)(struct qed_dev *cdev,
+ struct qed_filter_params *params);
+
+ int (*fastpath_stop)(struct qed_dev *cdev);
+
+ int (*eth_cqe_completion)(struct qed_dev *cdev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe);
+
+ void (*get_vport_stats)(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+void qed_put_eth_ops(void);
+
+#endif
diff --git a/kernel/include/linux/qed/qed_if.h b/kernel/include/linux/qed/qed_if.h
new file mode 100644
index 000000000..dc9a1353f
--- /dev/null
+++ b/kernel/include/linux/qed/qed_if.h
@@ -0,0 +1,498 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IF_H
+#define _QED_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_chain.h>
+
+#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+ (void __iomem *)(reg_addr))
+
+#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0xFF
+
+/* forward */
+struct qed_dev;
+
+struct qed_eth_pf_params {
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+};
+
+struct qed_pf_params {
+ struct qed_eth_pf_params eth_pf_params;
+};
+
+enum qed_int_mode {
+ QED_INT_MODE_INTA,
+ QED_INT_MODE_MSIX,
+ QED_INT_MODE_MSI,
+ QED_INT_MODE_POLL,
+};
+
+struct qed_sb_info {
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void __iomem *igu_addr;
+ u8 flags;
+#define QED_SB_INFO_INIT 0x1
+#define QED_SB_INFO_SETUP 0x2
+
+ struct qed_dev *cdev;
+};
+
+struct qed_dev_info {
+ unsigned long pci_mem_start;
+ unsigned long pci_mem_end;
+ unsigned int pci_irq;
+ u8 num_hwfns;
+
+ u8 hw_mac[ETH_ALEN];
+ bool is_mf;
+
+ /* FW version */
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ /* MFW version */
+ u32 mfw_rev;
+
+ u32 flash_size;
+ u8 mf_mode;
+};
+
+enum qed_sb_type {
+ QED_SB_TYPE_L2_QUEUE,
+};
+
+enum qed_protocol {
+ QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+ bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+ u32 override_flags;
+ bool autoneg;
+ u32 adv_speeds;
+ u32 forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
+ u32 pause_config;
+};
+
+struct qed_link_output {
+ bool link_up;
+
+ u32 supported_caps; /* In SUPPORTED defs */
+ u32 advertised_caps; /* In ADVERTISED defs */
+ u32 lp_caps; /* In ADVERTISED defs */
+ u32 speed; /* In Mb/s */
+ u8 duplex; /* In DUPLEX defs */
+ u8 port; /* In PORT defs */
+ bool autoneg;
+ u32 pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 12
+struct qed_slowpath_params {
+ u32 int_mode;
+ u8 drv_major;
+ u8 drv_minor;
+ u8 drv_rev;
+ u8 drv_eng;
+ u8 name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_int_info {
+ struct msix_entry *msix;
+ u8 msix_cnt;
+
+ /* This should be updated by the protocol driver */
+ u8 used_cnt;
+};
+
+struct qed_common_cb_ops {
+ void (*link_update)(void *dev,
+ struct qed_link_output *link);
+};
+
+struct qed_common_ops {
+ struct qed_dev* (*probe)(struct pci_dev *dev,
+ enum qed_protocol protocol,
+ u32 dp_module, u8 dp_level);
+
+ void (*remove)(struct qed_dev *cdev);
+
+ int (*set_power_state)(struct qed_dev *cdev,
+ pci_power_t state);
+
+ void (*set_id)(struct qed_dev *cdev,
+ char name[],
+ char ver_str[]);
+
+ /* Client drivers need to make this call before slowpath_start.
+ * PF params required for the call before slowpath_start is
+ * documented within the qed_pf_params structure definition.
+ */
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
+
+ int (*slowpath_stop)(struct qed_dev *cdev);
+
+ /* Requests to use `cnt' interrupts for fastpath.
+ * upon success, returns number of interrupts allocated for fastpath.
+ */
+ int (*set_fp_int)(struct qed_dev *cdev,
+ u16 cnt);
+
+ /* Fills `info' with pointers required for utilizing interrupts */
+ int (*get_fp_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev,
+ int index);
+/**
+ * @brief set_link - set links according to params
+ *
+ * @param cdev
+ * @param params - values used to override the default link configuration
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*set_link)(struct qed_dev *cdev,
+ struct qed_link_params *params);
+
+/**
+ * @brief get_link - returns the current link state.
+ *
+ * @param cdev
+ * @param if_link - structure to be filled with current link configuration.
+ */
+ void (*get_link)(struct qed_dev *cdev,
+ struct qed_link_output *if_link);
+
+/**
+ * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @param cdev
+ */
+ int (*drain)(struct qed_dev *cdev);
+
+/**
+ * @brief update_msglvl - update module debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+ void (*update_msglvl)(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+ int (*chain_alloc)(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+ void (*chain_free)(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+};
+
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+u32 qed_get_protocol_version(enum qed_protocol protocol);
+
+#define MASK_FIELD(_name, _value) \
+ ((_value) &= (_name ## _MASK))
+
+#define FIELD_VALUE(_name, _value) \
+ ((_value & _name ## _MASK) << _name ## _SHIFT)
+
+#define SET_FIELD(value, name, flag) \
+ do { \
+ (value) &= ~(name ## _MASK << name ## _SHIFT); \
+ (value) |= (((u64)flag) << (name ## _SHIFT)); \
+ } while (0)
+
+#define GET_FIELD(value, name) \
+ (((value) >> (name ## _SHIFT)) & name ## _MASK)
+
+/* Debug print definitions */
+#define DP_ERR(cdev, fmt, ...) \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__) \
+
+#define DP_NOTICE(cdev, fmt, ...) \
+ do { \
+ if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ \
+ } \
+ } while (0)
+
+#define DP_INFO(cdev, fmt, ...) \
+ do { \
+ if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define DP_VERBOSE(cdev, module, fmt, ...) \
+ do { \
+ if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
+ ((cdev)->dp_module & module))) { \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+enum DP_LEVEL {
+ QED_LEVEL_VERBOSE = 0x0,
+ QED_LEVEL_INFO = 0x1,
+ QED_LEVEL_NOTICE = 0x2,
+ QED_LEVEL_ERR = 0x3,
+};
+
+#define QED_LOG_LEVEL_SHIFT (30)
+#define QED_LOG_VERBOSE_MASK (0x3fffffff)
+#define QED_LOG_INFO_MASK (0x40000000)
+#define QED_LOG_NOTICE_MASK (0x80000000)
+
+enum DP_MODULE {
+ QED_MSG_SPQ = 0x10000,
+ QED_MSG_STATS = 0x20000,
+ QED_MSG_DCB = 0x40000,
+ QED_MSG_IOV = 0x80000,
+ QED_MSG_SP = 0x100000,
+ QED_MSG_STORAGE = 0x200000,
+ QED_MSG_CXT = 0x800000,
+ QED_MSG_ILT = 0x2000000,
+ QED_MSG_ROCE = 0x4000000,
+ QED_MSG_DEBUG = 0x8000000,
+ /* to be added...up to 0x8000000 */
+};
+
+struct qed_eth_stats {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 tpa_coalesced_pkts;
+ u64 tpa_coalesced_events;
+ u64 tpa_aborts_num;
+ u64 tpa_not_coalesced_pkts;
+ u64 tpa_coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 rx_mac_bytes;
+ u64 rx_mac_uc_packets;
+ u64 rx_mac_mc_packets;
+ u64 rx_mac_bc_packets;
+ u64 rx_mac_frames_ok;
+ u64 tx_mac_bytes;
+ u64 tx_mac_uc_packets;
+ u64 tx_mac_mc_packets;
+ u64 tx_mac_bc_packets;
+ u64 tx_mac_ctrl_frames;
+};
+
+#define QED_SB_IDX 0x0002
+
+#define RX_PI 0
+#define TX_PI(tc) (RX_PI + 1 + tc)
+
+static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+{
+ u32 prod = 0;
+ u16 rc = 0;
+
+ prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
+ STATUS_BLOCK_PROD_INDEX_MASK;
+ if (sb_info->sb_ack != prod) {
+ sb_info->sb_ack = prod;
+ rc |= QED_SB_IDX;
+ }
+
+ /* Let SB update */
+ mmiowb();
+ return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ * written to the IGU.
+ *
+ * @param sb_info - This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @param int_cmd - Enable/Disable/Nop
+ * @param upd_flg - whether igu consumer should be
+ * updated.
+ *
+ * @return inline void
+ */
+static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+ enum igu_int_cmd int_cmd,
+ u8 upd_flg)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+static inline void __internal_ram_wr(void *p_hwfn,
+ void __iomem *addr,
+ int size,
+ u32 *data)
+
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
+}
+
+static inline void internal_ram_wr(void __iomem *addr,
+ int size,
+ u32 *data)
+{
+ __internal_ram_wr(NULL, addr, size, data);
+}
+
+#endif
diff --git a/kernel/include/linux/quotaops.h b/kernel/include/linux/quotaops.h
index 77ca6601f..7a57c28eb 100644
--- a/kernel/include/linux/quotaops.h
+++ b/kernel/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
return 0;
}
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
{
+ return 0;
}
static inline void dquot_drop(struct inode *inode)
diff --git a/kernel/include/linux/radix-tree.h b/kernel/include/linux/radix-tree.h
index 933ff6977..8ddbd6e15 100644
--- a/kernel/include/linux/radix-tree.h
+++ b/kernel/include/linux/radix-tree.h
@@ -375,12 +375,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter: iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true. If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
-static __always_inline unsigned
+static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
@@ -414,9 +430,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return slot + offset + 1;
}
} else {
- unsigned size = radix_tree_chunk_size(iter) - 1;
+ long size = radix_tree_chunk_size(iter);
- while (size--) {
+ while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))
diff --git a/kernel/include/linux/random.h b/kernel/include/linux/random.h
index 4a64ad52d..1a8043616 100644
--- a/kernel/include/linux/random.h
+++ b/kernel/include/linux/random.h
@@ -6,14 +6,25 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/list.h>
+#include <linux/once.h>
+
#include <uapi/linux/random.h>
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *rdy);
+ struct module *owner;
+};
+
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
extern void get_random_bytes(void *buf, int nbytes);
+extern int add_random_ready_callback(struct random_ready_callback *rdy);
+extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
@@ -36,6 +47,10 @@ struct rnd_state {
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ DO_ONCE(prandom_seed_full_state, (pcpu_state))
/**
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/kernel/include/linux/rbtree.h b/kernel/include/linux/rbtree.h
index fb31765e9..24ddffd25 100644
--- a/kernel/include/linux/rbtree.h
+++ b/kernel/include/linux/rbtree.h
@@ -73,11 +73,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
-static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
- struct rb_node ** rb_link)
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
@@ -85,19 +85,30 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link);
+
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/**
- * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
- * given type safe against removal of rb_node entry
+ * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
+ * given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
+ *
+ * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
+ * list_for_each_entry_safe() and allows the iteration to continue independent
+ * of changes to @pos by the body of the loop.
+ *
+ * Note, however, that it cannot handle other modifications that re-order the
+ * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
+ * rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
diff --git a/kernel/include/linux/rbtree_augmented.h b/kernel/include/linux/rbtree_augmented.h
index 378c5ee75..14d7b831b 100644
--- a/kernel/include/linux/rbtree_augmented.h
+++ b/kernel/include/linux/rbtree_augmented.h
@@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
- parent->rb_left = new;
+ WRITE_ONCE(parent->rb_left, new);
else
- parent->rb_right = new;
+ WRITE_ONCE(parent->rb_right, new);
} else
- root->rb_node = new;
+ WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -137,7 +137,8 @@ static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *child = node->rb_right;
+ struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
@@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
+
tmp = child->rb_left;
if (!tmp) {
/*
@@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
+
augment->copy(node, successor);
} else {
/*
@@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
- parent->rb_left = child2 = successor->rb_right;
- successor->rb_right = child;
+ child2 = successor->rb_right;
+ WRITE_ONCE(parent->rb_left, child2);
+ WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
+
augment->copy(node, successor);
augment->propagate(parent, successor);
}
- successor->rb_left = tmp = node->rb_left;
+ tmp = node->rb_left;
+ WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
+
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
diff --git a/kernel/include/linux/rbtree_latch.h b/kernel/include/linux/rbtree_latch.h
new file mode 100644
index 000000000..4f3432c61
--- /dev/null
+++ b/kernel/include/linux/rbtree_latch.h
@@ -0,0 +1,212 @@
+/*
+ * Latched RB-trees
+ *
+ * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
+ *
+ * Since RB-trees have non-atomic modifications they're not immediately suited
+ * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
+ * lockless lookups; we cannot guarantee they return a correct result.
+ *
+ * The simplest solution is a seqlock + RB-tree, this will allow lockless
+ * lookups; but has the constraint (inherent to the seqlock) that read sides
+ * cannot nest in write sides.
+ *
+ * If we need to allow unconditional lookups (say as required for NMI context
+ * usage) we need a more complex setup; this data structure provides this by
+ * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * implement a latched RB-tree which does allow for unconditional lookups by
+ * virtue of always having (at least) one stable copy of the tree.
+ *
+ * However, while we have the guarantee that there is at all times one stable
+ * copy, this does not guarantee an iteration will not observe modifications.
+ * What might have been a stable copy at the start of the iteration, need not
+ * remain so for the duration of the iteration.
+ *
+ * Therefore, this does require a lockless RB-tree iteration to be non-fatal;
+ * see the comment in lib/rbtree.c. Note however that we only require the first
+ * condition -- not seeing partial stores -- because the latch thing isolates
+ * us from loops. If we were to interrupt a modification the lookup would be
+ * pointed at the stable tree and complete while the modification was halted.
+ */
+
+#ifndef RB_TREE_LATCH_H
+#define RB_TREE_LATCH_H
+
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+
+struct latch_tree_node {
+ struct rb_node node[2];
+};
+
+struct latch_tree_root {
+ seqcount_t seq;
+ struct rb_root tree[2];
+};
+
+/**
+ * latch_tree_ops - operators to define the tree order
+ * @less: used for insertion; provides the (partial) order between two elements.
+ * @comp: used for lookups; provides the order between the search key and an element.
+ *
+ * The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * latch_tree_find().
+ */
+struct latch_tree_ops {
+ bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
+ int (*comp)(void *key, struct latch_tree_node *b);
+};
+
+static __always_inline struct latch_tree_node *
+__lt_from_rb(struct rb_node *node, int idx)
+{
+ return container_of(node, struct latch_tree_node, node[idx]);
+}
+
+static __always_inline void
+__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
+ bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
+{
+ struct rb_root *root = &ltr->tree[idx];
+ struct rb_node **link = &root->rb_node;
+ struct rb_node *node = &ltn->node[idx];
+ struct rb_node *parent = NULL;
+ struct latch_tree_node *ltp;
+
+ while (*link) {
+ parent = *link;
+ ltp = __lt_from_rb(parent, idx);
+
+ if (less(ltn, ltp))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, root);
+}
+
+static __always_inline void
+__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
+{
+ rb_erase(&ltn->node[idx], &ltr->tree[idx]);
+}
+
+static __always_inline struct latch_tree_node *
+__lt_find(void *key, struct latch_tree_root *ltr, int idx,
+ int (*comp)(void *key, struct latch_tree_node *node))
+{
+ struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
+ struct latch_tree_node *ltn;
+ int c;
+
+ while (node) {
+ ltn = __lt_from_rb(node, idx);
+ c = comp(key, ltn);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return ltn;
+ }
+
+ return NULL;
+}
+
+/**
+ * latch_tree_insert() - insert @node into the trees @root
+ * @node: nodes to insert
+ * @root: trees to insert @node into
+ * @ops: operators defining the node order
+ *
+ * It inserts @node into @root in an ordered fashion such that we can always
+ * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ *
+ * The inserts use rcu_assign_pointer() to publish the element such that the
+ * tree structure is stored before we can observe the new @node.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_insert(struct latch_tree_node *node,
+ struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ raw_write_seqcount_latch(&root->seq);
+ __lt_insert(node, root, 0, ops->less);
+ raw_write_seqcount_latch(&root->seq);
+ __lt_insert(node, root, 1, ops->less);
+}
+
+/**
+ * latch_tree_erase() - removes @node from the trees @root
+ * @node: nodes to remote
+ * @root: trees to remove @node from
+ * @ops: operators defining the node order
+ *
+ * Removes @node from the trees @root in an ordered fashion such that we can
+ * always observe one complete tree. See the comment for
+ * raw_write_seqcount_latch().
+ *
+ * It is assumed that @node will observe one RCU quiescent state before being
+ * reused of freed.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_erase(struct latch_tree_node *node,
+ struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ raw_write_seqcount_latch(&root->seq);
+ __lt_erase(node, root, 0);
+ raw_write_seqcount_latch(&root->seq);
+ __lt_erase(node, root, 1);
+}
+
+/**
+ * latch_tree_find() - find the node matching @key in the trees @root
+ * @key: search key
+ * @root: trees to search for @key
+ * @ops: operators defining the node order
+ *
+ * Does a lockless lookup in the trees @root for the node matching @key.
+ *
+ * It is assumed that this is called while holding the appropriate RCU read
+ * side lock.
+ *
+ * If the operators define a partial order on the elements (there are multiple
+ * elements which have the same key value) it is undefined which of these
+ * elements will be found. Nor is it possible to iterate the tree to find
+ * further elements with the same key value.
+ *
+ * Returns: a pointer to the node matching @key or NULL.
+ */
+static __always_inline struct latch_tree_node *
+latch_tree_find(void *key, struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ struct latch_tree_node *node;
+ unsigned int seq;
+
+ do {
+ seq = raw_read_seqcount_latch(&root->seq);
+ node = __lt_find(key, root, seq & 1, ops->comp);
+ } while (read_seqcount_retry(&root->seq, seq));
+
+ return node;
+}
+
+#endif /* RB_TREE_LATCH_H */
diff --git a/kernel/include/linux/rcu_sync.h b/kernel/include/linux/rcu_sync.h
new file mode 100644
index 000000000..a63a33e61
--- /dev/null
+++ b/kernel/include/linux/rcu_sync.h
@@ -0,0 +1,86 @@
+/*
+ * RCU-based infrastructure for lightweight reader-writer locking
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (c) 2015, Red Hat, Inc.
+ *
+ * Author: Oleg Nesterov <oleg@redhat.com>
+ */
+
+#ifndef _LINUX_RCU_SYNC_H_
+#define _LINUX_RCU_SYNC_H_
+
+#include <linux/wait.h>
+#include <linux/rcupdate.h>
+
+enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
+
+/* Structure to mediate between updaters and fastpath-using readers. */
+struct rcu_sync {
+ int gp_state;
+ int gp_count;
+ wait_queue_head_t gp_wait;
+
+ int cb_state;
+ struct rcu_head cb_head;
+
+ enum rcu_sync_type gp_type;
+};
+
+extern void rcu_sync_lockdep_assert(struct rcu_sync *);
+
+/**
+ * rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
+ * Returns true if readers are permitted to use their fastpaths.
+ * Must be invoked within an RCU read-side critical section whose
+ * flavor matches that of the rcu_sync struture.
+ */
+static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
+{
+#ifdef CONFIG_PROVE_RCU
+ rcu_sync_lockdep_assert(rsp);
+#endif
+ return !rsp->gp_state; /* GP_IDLE */
+}
+
+extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter(struct rcu_sync *);
+extern void rcu_sync_exit(struct rcu_sync *);
+extern void rcu_sync_dtor(struct rcu_sync *);
+
+#define __RCU_SYNC_INITIALIZER(name, type) { \
+ .gp_state = 0, \
+ .gp_count = 0, \
+ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
+ .cb_state = 0, \
+ .gp_type = type, \
+ }
+
+#define __DEFINE_RCU_SYNC(name, type) \
+ struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
+
+#define DEFINE_RCU_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_SYNC)
+
+#define DEFINE_RCU_SCHED_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
+
+#define DEFINE_RCU_BH_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
+
+#endif /* _LINUX_RCU_SYNC_H_ */
diff --git a/kernel/include/linux/rculist.h b/kernel/include/linux/rculist.h
index a18b16f1d..5ed540986 100644
--- a/kernel/include/linux/rculist.h
+++ b/kernel/include/linux/rculist.h
@@ -29,8 +29,8 @@
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
- ACCESS_ONCE(list->next) = list;
- ACCESS_ONCE(list->prev) = list;
+ WRITE_ONCE(list->next, list);
+ WRITE_ONCE(list->prev, list);
}
/*
@@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
-({ \
- typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
- container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
-})
+ container_of(lockless_dereference(ptr), type, member)
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -288,7 +285,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
- struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
@@ -549,8 +546,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/kernel/include/linux/rcupdate.h b/kernel/include/linux/rcupdate.h
index 5d090cdaa..c2f5f9551 100644
--- a/kernel/include/linux/rcupdate.h
+++ b/kernel/include/linux/rcupdate.h
@@ -44,6 +44,8 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/ktime.h>
+
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
@@ -158,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
* more than one CPU).
*/
void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ rcu_callback_t func);
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -192,7 +194,7 @@ void call_rcu(struct rcu_head *head,
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ rcu_callback_t func);
#endif
/**
@@ -215,7 +217,7 @@ void call_rcu_bh(struct rcu_head *head,
* memory ordering guarantees.
*/
void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
+ rcu_callback_t func);
void synchronize_sched(void);
@@ -228,6 +230,36 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+ struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
+ __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -246,7 +278,7 @@ void wakeme_after_rcu(struct rcu_head *head);
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
@@ -274,12 +306,14 @@ static inline int sched_rcu_preempt_depth(void) { return 0; }
static inline void __rcu_read_lock(void)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ preempt_enable();
}
static inline void synchronize_rcu(void)
@@ -303,10 +337,6 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
@@ -322,7 +352,7 @@ static inline void rcu_sysrq_end(void)
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
@@ -330,7 +360,7 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
@@ -375,8 +405,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ if (READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
@@ -405,10 +435,6 @@ bool __rcu_is_watching(void);
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-typedef void call_rcu_func_t(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
@@ -489,46 +515,10 @@ int rcu_read_lock_bh_held(void);
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise. Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section. This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes. This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
*/
#ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
- int lockdep_opinion = 0;
-
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- if (debug_locks)
- lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
@@ -568,14 +558,14 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU
/**
- * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*/
-#define rcu_lockdep_assert(c, s) \
+#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
+ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -584,8 +574,8 @@ static inline int rcu_read_lock_sched_held(void)
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -596,15 +586,15 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh read-side critical section"); \
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -627,7 +617,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
@@ -635,32 +625,17 @@ static inline void rcu_preempt_sleep_check(void)
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define __rcu_access_index(p, space) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
- /* Dependency order vs. p above. */ \
- typeof(p) _________p1 = lockless_dereference(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check() usage"); \
- (_________p1); \
-})
-
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
@@ -668,21 +643,6 @@ static inline void rcu_preempt_sleep_check(void)
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
-/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
@@ -720,7 +680,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
@@ -805,47 +765,12 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1. Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
-/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
@@ -885,6 +810,28 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
+ * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
+ * @p: The pointer to hand off
+ *
+ * This is simply an identity function, but it documents where a pointer
+ * is handed off from RCU to some other synchronization mechanism, for
+ * example, reference counting or locking. In C11, it would map to
+ * kill_dependency(). It could be used as follows:
+ *
+ * rcu_read_lock();
+ * p = rcu_dereference(gp);
+ * long_lived = is_long_lived(p);
+ * if (long_lived) {
+ * if (!atomic_inc_not_zero(p->refcnt))
+ * long_lived = false;
+ * else
+ * p = rcu_pointer_handoff(p);
+ * }
+ * rcu_read_unlock();
+ */
+#define rcu_pointer_handoff(p) (p)
+
+/**
* rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
@@ -930,8 +877,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -981,8 +928,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -1013,8 +960,8 @@ static inline void rcu_read_lock_bh(void)
#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
#endif
}
@@ -1028,8 +975,8 @@ static inline void rcu_read_unlock_bh(void)
#ifdef CONFIG_PREEMPT_RT_FULL
rcu_read_unlock();
#else
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
#endif
@@ -1054,8 +1001,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -1072,8 +1019,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
@@ -1124,7 +1071,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_dereference_sparse(p, __rcu); \
- p = RCU_INITIALIZER(v); \
+ WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
@@ -1147,7 +1094,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define __kfree_rcu(head, offset) \
do { \
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+ kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
} while (0)
/**
@@ -1179,13 +1126,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+#ifdef CONFIG_TINY_RCU
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- *delta_jiffies = ULONG_MAX;
+ *nextevt = KTIME_MAX;
return 0;
}
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
diff --git a/kernel/include/linux/rcutiny.h b/kernel/include/linux/rcutiny.h
index 937edaeb1..4c1aaf9cc 100644
--- a/kernel/include/linux/rcutiny.h
+++ b/kernel/include/linux/rcutiny.h
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return 0;
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ might_sleep();
+}
+
static inline void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
@@ -73,7 +83,7 @@ static inline void synchronize_sched_expedited(void)
}
static inline void kfree_call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
+ rcu_callback_t func)
{
call_rcu(head, func);
}
@@ -159,6 +169,22 @@ static inline void rcu_cpu_stall_reset(void)
{
}
+static inline void rcu_idle_enter(void)
+{
+}
+
+static inline void rcu_idle_exit(void)
+{
+}
+
+static inline void rcu_irq_enter(void)
+{
+}
+
+static inline void rcu_irq_exit(void)
+{
+}
+
static inline void exit_rcu(void)
{
}
@@ -190,6 +216,7 @@ static inline bool rcu_is_watching(void)
static inline void rcu_all_qs(void)
{
+ barrier(); /* Avoid RCU read-side critical sections leaking across. */
}
#endif /* __LINUX_RCUTINY_H */
diff --git a/kernel/include/linux/rcutree.h b/kernel/include/linux/rcutree.h
index 0b350893b..436c9e62b 100644
--- a/kernel/include/linux/rcutree.h
+++ b/kernel/include/linux/rcutree.h
@@ -31,9 +31,7 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(void);
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies);
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
/*
@@ -54,7 +52,7 @@ void synchronize_rcu_bh(void);
void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
/**
* synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
@@ -86,6 +84,8 @@ void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
+unsigned long get_state_synchronize_sched(void);
+void cond_synchronize_sched(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
@@ -99,6 +99,11 @@ void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
diff --git a/kernel/include/linux/regmap.h b/kernel/include/linux/regmap.h
index 116655d92..d68bb4021 100644
--- a/kernel/include/linux/regmap.h
+++ b/kernel/include/linux/regmap.h
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/err.h>
#include <linux/bug.h>
+#include <linux/lockdep.h>
struct module;
struct device;
@@ -50,6 +51,20 @@ struct reg_default {
unsigned int def;
};
+/**
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ */
+struct reg_sequence {
+ unsigned int reg;
+ unsigned int def;
+ unsigned int delay_us;
+};
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -281,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
typedef void (*regmap_hw_free_context)(void *context);
@@ -296,8 +313,12 @@ typedef void (*regmap_hw_free_context)(void *context);
* if not implemented on a given device.
* @async_write: Write operation which completes asynchronously, optional and
* must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ * write operation has to complete when returning from the function.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
* @async_alloc: Allocate a regmap_async() structure.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
@@ -307,7 +328,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* @val_format_endian_default: Default endianness for formatted register
* values. Used when the regmap_config specifies DEFAULT. If this is
* DEFAULT, BIG is assumed.
- * @async_size: Size of struct used for async work.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
*/
struct regmap_bus {
bool fast_io;
@@ -315,6 +337,7 @@ struct regmap_bus {
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
regmap_hw_free_context free_context;
@@ -322,47 +345,186 @@ struct regmap_bus {
u8 read_flag_mask;
enum regmap_endian reg_format_endian_default;
enum regmap_endian val_format_endian_default;
+ size_t max_raw_read;
+ size_t max_raw_write;
};
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ fn(__VA_ARGS__, &_key, \
+ KBUILD_BASENAME ":" \
+ __stringify(__LINE__) ":" \
+ "(" name ")->lock"); \
+ }) \
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__regmap_init, #config, \
+ dev, bus, bus_context, config)
int regmap_attach_dev(struct device *dev, struct regmap *map,
- const struct regmap_config *config);
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
-
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
+ const struct regmap_config *config);
-bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * regmap_init_mmio_clk(): Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* regmap_init_mmio(): Initialise register map
@@ -374,12 +536,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-static inline struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define regmap_init_mmio(dev, regs, config) \
+ regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
+ ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * devm_regmap_init(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
+ dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c(): Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +651,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev,
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define devm_regmap_init_mmio(dev, regs, config) \
+ devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
+ ac97, config)
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +679,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_multi_reg_write_bypassed(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
int num_regs);
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
@@ -424,6 +693,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -433,8 +704,12 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -448,7 +723,7 @@ void regcache_mark_dirty(struct regmap *map);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_parse_val(struct regmap *map, const void *buf,
unsigned int *val);
@@ -501,6 +776,8 @@ int regmap_field_update_bits(struct regmap_field *field,
int regmap_fields_write(struct regmap_field *field, unsigned int id,
unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val);
int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -517,6 +794,9 @@ struct regmap_irq {
unsigned int mask;
};
+#define REGMAP_IRQ_REG(_irq, _off, _mask) \
+ [_irq] = { .reg_offset = (_off), .mask = (_mask) }
+
/**
* Description of a generic regmap irq_chip. This is not intended to
* handle every possible interrupt controller, but it should handle a
@@ -526,6 +806,8 @@ struct regmap_irq {
*
* @status_base: Base status register address.
* @mask_base: Base mask register address.
+ * @unmask_base: Base unmask register address. for chips who have
+ * separate mask and unmask registers
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
@@ -533,6 +815,7 @@ struct regmap_irq {
* @init_ack_masked: Ack all masked interrupts once during initalization.
* @mask_invert: Inverted mask register: cleared bits are masked out.
* @use_ack: Use @ack register even if it is zero.
+ * @ack_invert: Inverted ack register: cleared bits for ack.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
@@ -546,12 +829,14 @@ struct regmap_irq_chip {
unsigned int status_base;
unsigned int mask_base;
+ unsigned int unmask_base;
unsigned int ack_base;
unsigned int wake_base;
unsigned int irq_reg_stride;
bool init_ack_masked:1;
bool mask_invert:1;
bool use_ack:1;
+ bool ack_invert:1;
bool wake_invert:1;
bool runtime_pm:1;
@@ -643,6 +928,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_update_bits_async(struct regmap *map,
unsigned int reg,
unsigned int mask, unsigned int val)
@@ -676,6 +968,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
return -EINVAL;
}
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/kernel/include/linux/regulator/consumer.h b/kernel/include/linux/regulator/consumer.h
index f8a689ed6..9e0e76992 100644
--- a/kernel/include/linux/regulator/consumer.h
+++ b/kernel/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
{
return 0;
}
+
+static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
+{
+ return -EINVAL;
+}
+
#endif
+static inline int regulator_set_voltage_triplet(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
+ return 0;
+
+ return regulator_set_voltage(regulator, min_uV, max_uV);
+}
+
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
diff --git a/kernel/include/linux/regulator/da9211.h b/kernel/include/linux/regulator/da9211.h
index 5dd65acc2..a43a5ca11 100644
--- a/kernel/include/linux/regulator/da9211.h
+++ b/kernel/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
/*
- * da9211.h - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
enum da9211_chip_id {
DA9211,
DA9213,
+ DA9215,
};
struct da9211_pdata {
diff --git a/kernel/include/linux/regulator/driver.h b/kernel/include/linux/regulator/driver.h
index fffa688ac..9c2903e58 100644
--- a/kernel/include/linux/regulator/driver.h
+++ b/kernel/include/linux/regulator/driver.h
@@ -91,6 +91,7 @@ struct regulator_linear_range {
* @set_current_limit: Configure a limit for a current-limited regulator.
* The driver should select the current closest to max_uA.
* @get_current_limit: Get the configured limit for a current-limited regulator.
+ * @set_input_current_limit: Configure an input limit.
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
@@ -111,6 +112,7 @@ struct regulator_linear_range {
* to stabilise after being set to a new value, in microseconds.
* The function provides the from and to voltage selector, the
* function should return the worst case.
+ * @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
@@ -121,6 +123,9 @@ struct regulator_linear_range {
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
*
+ * @set_pull_down: Configure the regulator to pull down when the regulator
+ * is disabled.
+ *
* This struct describes regulator operations which can be implemented by
* regulator chip drivers.
*/
@@ -142,6 +147,9 @@ struct regulator_ops {
int min_uA, int max_uA);
int (*get_current_limit) (struct regulator_dev *);
+ int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+ int (*set_over_current_protection) (struct regulator_dev *);
+
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
int (*disable) (struct regulator_dev *);
@@ -158,6 +166,8 @@ struct regulator_ops {
unsigned int old_selector,
unsigned int new_selector);
+ int (*set_soft_start) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
@@ -187,6 +197,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+
+ int (*set_pull_down) (struct regulator_dev *);
};
/*
@@ -233,6 +245,7 @@ enum regulator_type {
* @linear_min_sel: Minimal selector for starting linear mapping
* @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
* @n_linear_ranges: Number of entries in the @linear_ranges table.
* @volt_table: Voltage mapping table (if table based mapping)
@@ -280,6 +293,7 @@ struct regulator_desc {
unsigned int linear_min_sel;
int fixed_uV;
unsigned int ramp_delay;
+ int min_dropout_uV;
const struct regulator_linear_range *linear_ranges;
int n_linear_ranges;
diff --git a/kernel/include/linux/regulator/machine.h b/kernel/include/linux/regulator/machine.h
index b07562e08..a1067d0b3 100644
--- a/kernel/include/linux/regulator/machine.h
+++ b/kernel/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
*
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
+ * @ilim_uA: Maximum input current.
+ * @system_load: Load that isn't captured by any consumer requests.
*
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
+ * @soft_start: Enable soft start so that voltage ramps slowly.
+ * @pull_down: Enable pull down when regulator is disabled.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
@@ -111,6 +115,9 @@ struct regulation_constraints {
/* current output range (inclusive) - for current control */
int min_uA;
int max_uA;
+ int ilim_uA;
+
+ int system_load;
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
@@ -138,6 +145,9 @@ struct regulation_constraints {
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
unsigned ramp_disable:1; /* disable ramp delay */
+ unsigned soft_start:1; /* ramp voltage slowly */
+ unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned over_current_protection:1; /* auto disable on over current */
};
/**
diff --git a/kernel/include/linux/regulator/max8973-regulator.h b/kernel/include/linux/regulator/max8973-regulator.h
index f8acc052e..f6a8a16a0 100644
--- a/kernel/include/linux/regulator/max8973-regulator.h
+++ b/kernel/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
+ * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
+ * then GPIO number can be provided. If no GPIO controlled then
+ * it should be -1.
* @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
bool enable_ext_control;
+ int enable_gpio;
int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/kernel/include/linux/regulator/mt6311.h b/kernel/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000..847325939
--- /dev/null
+++ b/kernel/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6311_H
+#define __LINUX_REGULATOR_MT6311_H
+
+#define MT6311_MAX_REGULATORS 2
+
+enum {
+ MT6311_ID_VDVFS = 0,
+ MT6311_ID_VBIASN,
+};
+
+#define MT6311_E1_CID_CODE 0x10
+#define MT6311_E2_CID_CODE 0x20
+#define MT6311_E3_CID_CODE 0x30
+
+#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/kernel/include/linux/remoteproc.h b/kernel/include/linux/remoteproc.h
index 78b8a9b9d..9c4e1384f 100644
--- a/kernel/include/linux/remoteproc.h
+++ b/kernel/include/linux/remoteproc.h
@@ -36,11 +36,11 @@
#define REMOTEPROC_H
#include <linux/types.h>
-#include <linux/klist.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
#include <linux/completion.h>
#include <linux/idr.h>
+#include <linux/of.h>
/**
* struct resource_table - firmware resource table header
@@ -330,11 +330,13 @@ struct rproc;
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
+ * @da_to_va: optional platform hook to perform address translations
*/
struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
};
/**
@@ -375,7 +377,7 @@ enum rproc_crash_type {
/**
* struct rproc - represents a physical remote processor device
- * @node: klist node of this rproc object
+ * @node: list node of this rproc object
* @domain: iommu domain
* @name: human readable name of the rproc
* @firmware: name of firmware file to be loaded
@@ -407,7 +409,7 @@ enum rproc_crash_type {
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
- struct klist_node node;
+ struct list_head node;
struct iommu_domain *domain;
const char *name;
const char *firmware;
@@ -481,6 +483,7 @@ struct rproc_vdev {
u32 rsc_offset;
};
+struct rproc *rproc_get_by_phandle(phandle phandle);
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len);
diff --git a/kernel/include/linux/reset.h b/kernel/include/linux/reset.h
index da5602bd7..7f65f9cff 100644
--- a/kernel/include/linux/reset.h
+++ b/kernel/include/linux/reset.h
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
return -ENOSYS;
}
+static inline struct reset_control *__must_check reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
diff --git a/kernel/include/linux/reset/bcm63xx_pmb.h b/kernel/include/linux/reset/bcm63xx_pmb.h
new file mode 100644
index 000000000..bb4af7b5e
--- /dev/null
+++ b/kernel/include/linux/reset/bcm63xx_pmb.h
@@ -0,0 +1,88 @@
+/*
+ * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __BCM63XX_PMB_H
+#define __BCM63XX_PMB_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+/* PMB Master controller register */
+#define PMB_CTRL 0x00
+#define PMC_PMBM_START (1 << 31)
+#define PMC_PMBM_TIMEOUT (1 << 30)
+#define PMC_PMBM_SLAVE_ERR (1 << 29)
+#define PMC_PMBM_BUSY (1 << 28)
+#define PMC_PMBM_READ (0 << 20)
+#define PMC_PMBM_WRITE (1 << 20)
+#define PMB_WR_DATA 0x04
+#define PMB_TIMEOUT 0x08
+#define PMB_RD_DATA 0x0C
+
+#define PMB_BUS_ID_SHIFT 8
+
+/* Perform the low-level PMB master operation, shared between reads and
+ * writes.
+ */
+static inline int __bpcm_do_op(void __iomem *master, unsigned int addr,
+ u32 off, u32 op)
+{
+ unsigned int timeout = 1000;
+ u32 cmd;
+
+ cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off);
+ writel(cmd, master + PMB_CTRL);
+ do {
+ cmd = readl(master + PMB_CTRL);
+ if (!(cmd & PMC_PMBM_START))
+ return 0;
+
+ if (cmd & PMC_PMBM_SLAVE_ERR)
+ return -EIO;
+
+ if (cmd & PMC_PMBM_TIMEOUT)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ } while (timeout-- > 0);
+
+ return -ETIMEDOUT;
+}
+
+static inline int bpcm_rd(void __iomem *master, unsigned int addr,
+ u32 off, u32 *val)
+{
+ int ret = 0;
+
+ ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ);
+ *val = readl(master + PMB_RD_DATA);
+
+ return ret;
+}
+
+static inline int bpcm_wr(void __iomem *master, unsigned int addr,
+ u32 off, u32 val)
+{
+ int ret = 0;
+
+ writel(val, master + PMB_WR_DATA);
+ ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE);
+
+ return ret;
+}
+
+#endif /* __BCM63XX_PMB_H */
diff --git a/kernel/include/linux/rhashtable.h b/kernel/include/linux/rhashtable.h
index 843ceca9a..e50b31d18 100644
--- a/kernel/include/linux/rhashtable.h
+++ b/kernel/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
-int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht);
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
+ const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -598,9 +600,11 @@ restart:
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
- err = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (err == -EAGAIN)
+ tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (!IS_ERR_OR_NULL(tbl))
goto slow_path;
+
+ err = PTR_ERR(tbl);
goto out;
}
@@ -611,7 +615,7 @@ restart:
if (unlikely(rht_grow_above_100(ht, tbl))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht);
+ err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
if (err)
return err;
diff --git a/kernel/include/linux/ring_buffer.h b/kernel/include/linux/ring_buffer.h
index e2c13cd86..4acc552e9 100644
--- a/kernel/include/linux/ring_buffer.h
+++ b/kernel/include/linux/ring_buffer.h
@@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
}
#endif
-int ring_buffer_empty(struct ring_buffer *buffer);
-int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+bool ring_buffer_empty(struct ring_buffer *buffer);
+bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer);
diff --git a/kernel/include/linux/rio.h b/kernel/include/linux/rio.h
index 6bda06f21..cde976e86 100644
--- a/kernel/include/linux/rio.h
+++ b/kernel/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
- * @switches: List of switches in this netowrk
+ * @switches: List of switches in this network
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
diff --git a/kernel/include/linux/rmap.h b/kernel/include/linux/rmap.h
index c89c53a11..ddda2ac34 100644
--- a/kernel/include/linux/rmap.h
+++ b/kernel/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ * and caller guarantees they will
+ * do a final flush if necessary */
};
#ifdef CONFIG_MMU
@@ -105,20 +108,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- up_write(&anon_vma->root->rwsem);
-}
-
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);
diff --git a/kernel/include/linux/rotary_encoder.h b/kernel/include/linux/rotary_encoder.h
index 3f594dce5..fe3dc64e5 100644
--- a/kernel/include/linux/rotary_encoder.h
+++ b/kernel/include/linux/rotary_encoder.h
@@ -8,9 +8,10 @@ struct rotary_encoder_platform_data {
unsigned int gpio_b;
unsigned int inverted_a;
unsigned int inverted_b;
+ unsigned int steps_per_period;
bool relative_axis;
bool rollover;
- bool half_period;
+ bool wakeup_source;
};
#endif /* __ROTARY_ENCODER_H__ */
diff --git a/kernel/include/linux/rtc.h b/kernel/include/linux/rtc.h
index 8dcf6825f..3359f0422 100644
--- a/kernel/include/linux/rtc.h
+++ b/kernel/include/linux/rtc.h
@@ -24,6 +24,14 @@ extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
ktime_t rtc_tm_to_ktime(struct rtc_time tm);
struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+/*
+ * rtc_tm_sub - Return the difference in seconds.
+ */
+static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
+{
+ return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
+}
+
/**
* Deprecated. Use rtc_time64_to_tm().
*/
@@ -101,8 +109,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
-struct rtc_device
-{
+struct rtc_device {
struct device dev;
struct module *owner;
@@ -161,7 +168,6 @@ extern void devm_rtc_device_unregister(struct device *dev,
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
extern int rtc_set_ntp_time(struct timespec64 now);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
@@ -198,10 +204,10 @@ int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
-int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
- ktime_t expires, ktime_t period);
-int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
+ ktime_t expires, ktime_t period);
+void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
void rtc_timer_do_work(struct work_struct *work);
static inline bool is_leap_year(unsigned int year)
diff --git a/kernel/include/linux/rtc/sirfsoc_rtciobrg.h b/kernel/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c8e..aefd99726 100644
--- a/kernel/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/kernel/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
#ifndef _SIRFSOC_RTC_IOBRG_H_
#define _SIRFSOC_RTC_IOBRG_H_
+struct regmap_config;
+
extern void sirfsoc_rtc_iobrg_besyncing(void);
extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+ const struct regmap_config *config);
#endif
diff --git a/kernel/include/linux/rtmutex.h b/kernel/include/linux/rtmutex.h
index d5a04ea47..30211c627 100644
--- a/kernel/include/linux/rtmutex.h
+++ b/kernel/include/linux/rtmutex.h
@@ -13,8 +13,8 @@
#define __LINUX_RT_MUTEX_H
#include <linux/linkage.h>
-#include <linux/rbtree.h>
#include <linux/spinlock_types_raw.h>
+#include <linux/rbtree.h>
extern int max_lock_depth; /* for sysctl */
diff --git a/kernel/include/linux/rtnetlink.h b/kernel/include/linux/rtnetlink.h
index 7b8e260c4..4be5048b1 100644
--- a/kernel/include/linux/rtnetlink.h
+++ b/kernel/include/linux/rtnetlink.h
@@ -33,11 +33,11 @@ extern wait_queue_head_t netdev_unregistering_wq;
extern struct mutex net_mutex;
#ifdef CONFIG_PROVE_LOCKING
-extern int lockdep_rtnl_is_held(void);
+extern bool lockdep_rtnl_is_held(void);
#else
-static inline int lockdep_rtnl_is_held(void)
+static inline bool lockdep_rtnl_is_held(void)
{
- return 1;
+ return true;
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
@@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
void net_inc_ingress_queue(void);
void net_dec_ingress_queue(void);
-#else
-static inline void net_inc_ingress_queue(void)
-{
-}
-
-static inline void net_dec_ingress_queue(void)
-{
-}
#endif
extern void rtnetlink_init(void);
@@ -122,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask, int nlflags);
+ u32 flags, u32 mask, int nlflags,
+ u32 filter_mask,
+ int (*vlan_fill)(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 filter_mask));
#endif /* __LINUX_RTNETLINK_H */
diff --git a/kernel/include/linux/rwsem_rt.h b/kernel/include/linux/rwsem_rt.h
index 928a05cbf..f97860b2e 100644
--- a/kernel/include/linux/rwsem_rt.h
+++ b/kernel/include/linux/rwsem_rt.h
@@ -51,18 +51,20 @@ do { \
__rt_init_rwsem((sem), #sem, &__key); \
} while (0)
-extern void rt_down_write(struct rw_semaphore *rwsem);
+extern void rt_down_write(struct rw_semaphore *rwsem);
extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
- struct lockdep_map *nest);
-extern void rt_down_read(struct rw_semaphore *rwsem);
+ struct lockdep_map *nest);
+extern void rt__down_read(struct rw_semaphore *rwsem);
+extern void rt_down_read(struct rw_semaphore *rwsem);
extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
+extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-extern void __rt_up_read(struct rw_semaphore *rwsem);
-extern void rt_up_read(struct rw_semaphore *rwsem);
-extern void rt_up_write(struct rw_semaphore *rwsem);
-extern void rt_downgrade_write(struct rw_semaphore *rwsem);
+extern void __rt_up_read(struct rw_semaphore *rwsem);
+extern void rt_up_read(struct rw_semaphore *rwsem);
+extern void rt_up_write(struct rw_semaphore *rwsem);
+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
#define init_rwsem(sem) rt_init_rwsem(sem)
#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
@@ -73,11 +75,21 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !RB_EMPTY_ROOT(&sem->lock.waiters);
}
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ rt__down_read(sem);
+}
+
static inline void down_read(struct rw_semaphore *sem)
{
rt_down_read(sem);
}
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ return rt__down_read_trylock(sem);
+}
+
static inline int down_read_trylock(struct rw_semaphore *sem)
{
return rt_down_read_trylock(sem);
diff --git a/kernel/include/linux/scatterlist.h b/kernel/include/linux/scatterlist.h
index ed8f9e70d..556ec1ea2 100644
--- a/kernel/include/linux/scatterlist.h
+++ b/kernel/include/linux/scatterlist.h
@@ -2,13 +2,39 @@
#define _LINUX_SCATTERLIST_H
#include <linux/string.h>
+#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
-
-#include <asm/types.h>
-#include <asm/scatterlist.h>
#include <asm/io.h>
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ unsigned int dma_length;
+#endif
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries dma_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif
+
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
@@ -18,10 +44,9 @@ struct sg_table {
/*
* Notes on SG table design.
*
- * Architectures must provide an unsigned long page_link field in the
- * scatterlist struct. We use that to place the page pointer AND encode
- * information about the sg table as well. The two lower bits are reserved
- * for this information.
+ * We use the unsigned long page_link field in the scatterlist struct to place
+ * the page pointer AND encode information about the sg table as well. The two
+ * lower bits are reserved for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
@@ -136,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
- BUG();
-#endif
-
/*
* offset and length are unused for chain entry. Clear them.
*/
@@ -221,10 +242,16 @@ static inline void *sg_virt(struct scatterlist *sg)
}
int sg_nents(struct scatterlist *sg);
+int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
@@ -239,13 +266,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
+size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+ size_t buflen, off_t skip, bool to_buffer);
+
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
+ const void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
+ const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
diff --git a/kernel/include/linux/sched.h b/kernel/include/linux/sched.h
index 956658437..58c5ec8c3 100644
--- a/kernel/include/linux/sched.h
+++ b/kernel/include/linux/sched.h
@@ -25,7 +25,7 @@ struct sched_param {
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <asm/kmap_types.h>
#include <asm/page.h>
@@ -59,6 +59,7 @@ struct sched_param {
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/magic.h>
+#include <linux/cgroup-defs.h>
#include <asm/processor.h>
@@ -133,6 +134,7 @@ struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
+struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -174,9 +176,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
-extern void update_cpu_load_nohz(void);
-extern unsigned long get_parent_ip(unsigned long addr);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void update_cpu_load_nohz(void);
+#else
+static inline void update_cpu_load_nohz(void) { }
+#endif
extern void dump_cpu_task(int cpu);
@@ -186,8 +191,6 @@ struct task_group;
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
-extern void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#endif
/*
@@ -214,9 +217,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_NOLOAD 1024
+#define TASK_STATE_MAX 2048
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -226,6 +230,8 @@ extern char ___assert_task_state[1 - 2*!!(
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
+
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
@@ -238,7 +244,8 @@ extern char ___assert_task_state[1 - 2*!!(
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0)
+ (task->flags & PF_FROZEN) == 0 && \
+ (task->state & TASK_NOLOAD) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -250,7 +257,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- set_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
@@ -272,7 +279,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- set_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
@@ -280,7 +287,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
+ smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -296,7 +303,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
+ smp_store_mb(current->state, (state_value))
#endif
@@ -338,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
- return smp_processor_id();
-}
#endif
/*
@@ -382,6 +385,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
+extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
#else
static inline void touch_softlockup_watchdog(void)
@@ -481,9 +485,11 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
+#define MMF_DUMP_DAX_PRIVATE 9
+#define MMF_DUMP_DAX_SHARED 10
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 7
+#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
@@ -528,39 +534,49 @@ struct cpu_itimer {
};
/**
- * struct cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
+ * @lock: protects the above two fields
*
- * Gathers a generic snapshot of user and system time.
+ * Stores previous user/system time values such that we can guarantee
+ * monotonicity.
*/
-struct cputime {
+struct prev_cputime {
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
+ raw_spinlock_t lock;
+#endif
};
+static inline void prev_cputime_init(struct prev_cputime *prev)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ prev->utime = prev->stime = 0;
+ raw_spin_lock_init(&prev->lock);
+#endif
+}
+
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
- * This is an extension of struct cputime that includes the total runtime
- * spent by the task from the scheduler point of view.
- *
- * As a result, this structure groups together three kinds of CPU time
- * that are tracked for threads and thread groups. Most things considering
- * CPU time want to group these counts together and treat all three
- * of them in parallel.
+ * This structure groups together three kinds of CPU time that are tracked for
+ * threads and thread groups. Most things considering CPU time want to group
+ * these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
+
/* Alternate field names when used to cache expirations. */
-#define prof_exp stime
#define virt_exp utime
+#define prof_exp stime
#define sched_exp sum_exec_runtime
#define INIT_CPUTIME \
@@ -570,35 +586,59 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
-#else
-#define PREEMPT_DISABLED PREEMPT_ENABLED
-#endif
+/*
+ * This is the atomic variant of task_cputime, which can be used for
+ * storing and updating task_cputime statistics without locking.
+ */
+struct task_cputime_atomic {
+ atomic64_t utime;
+ atomic64_t stime;
+ atomic64_t sum_exec_runtime;
+};
+
+#define INIT_CPUTIME_ATOMIC \
+ (struct task_cputime_atomic) { \
+ .utime = ATOMIC64_INIT(0), \
+ .stime = ATOMIC64_INIT(0), \
+ .sum_exec_runtime = ATOMIC64_INIT(0), \
+ }
+
+#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
+/*
+ * Disable preemption until the scheduler is running -- use an unconditional
+ * value so that it also works on !PREEMPT_COUNT kernels.
+ *
+ * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
+ */
+#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
/*
- * Disable preemption until the scheduler is running.
- * Reset by start_kernel()->sched_init()->init_idle().
+ * Initial preempt_count value; reflects the preempt_count schedule invariant
+ * which states that during context switches:
+ *
+ * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
*
- * We include PREEMPT_ACTIVE to avoid cond_resched() from working
- * before the scheduler is active -- see should_resched().
+ * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
+ * Note: See finish_task_switch().
*/
-#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
+#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/**
* struct thread_group_cputimer - thread group interval timer counts
- * @cputime: thread group interval timers.
- * @running: non-zero when there are timers running and
- * @cputime receives updates.
- * @lock: lock for fields in this struct.
+ * @cputime_atomic: atomic thread group interval timers.
+ * @running: true when there are timers running and
+ * @cputime_atomic receives updates.
+ * @checking_timer: true when a thread in the group is in the
+ * process of checking for thread group timers.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
- struct task_cputime cputime;
- int running;
- raw_spinlock_t lock;
+ struct task_cputime_atomic cputime_atomic;
+ bool running;
+ bool checking_timer;
};
#include <linux/rwsem.h>
@@ -698,9 +738,7 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
-#endif
+ struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
@@ -737,18 +775,6 @@ struct signal_struct {
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
-#ifdef CONFIG_CGROUPS
- /*
- * group_rwsem prevents new tasks from entering the threadgroup and
- * member tasks from exiting,a more specifically, setting of
- * PF_EXITING. fork and exit paths are protected with this rwsem
- * using threadgroup_change_begin/end(). Users which require
- * threadgroup to remain stable should use threadgroup_[un]lock()
- * which also takes care of exec path. Currently, cgroup is the
- * only user.
- */
- struct rw_semaphore group_rwsem;
-#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
@@ -805,6 +831,7 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
@@ -815,7 +842,7 @@ struct user_struct {
struct hlist_node uidhash_node;
kuid_t uid;
-#ifdef CONFIG_PERF_EVENTS
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
atomic_long_t locked_vm;
#endif
};
@@ -831,7 +858,7 @@ extern struct user_struct root_user;
struct backing_dev_info;
struct reclaim_state;
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
@@ -841,7 +868,7 @@ struct sched_info {
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
-#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+#endif /* CONFIG_SCHED_INFO */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
@@ -943,8 +970,18 @@ struct wake_q_head {
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
-extern void wake_up_q(struct wake_q_head *head);
+ struct task_struct *task);
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
+
+static inline void wake_up_q(struct wake_q_head *head)
+{
+ __wake_up_q(head, false);
+}
+
+static inline void wake_up_q_sleeper(struct wake_q_head *head)
+{
+ __wake_up_q(head, true);
+}
/*
* sched-domains (multiprocessor balancing) declarations:
@@ -1114,8 +1151,6 @@ struct sched_domain_topology_level {
#endif
};
-extern struct sched_domain_topology_level *sched_domain_topology;
-
extern void set_sched_topology(struct sched_domain_topology_level *tl);
extern void wake_up_if_idle(int cpu);
@@ -1162,29 +1197,24 @@ struct load_weight {
u32 inv_weight;
};
+/*
+ * The load_avg/util_avg accumulates an infinite geometric series.
+ * 1) load_avg factors frequency scaling into the amount of time that a
+ * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
+ * aggregated such weights of all runnable and blocked sched_entities.
+ * 2) util_avg factors frequency and cpu scaling into the amount of time
+ * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
+ * For cfs_rq, it is the aggregated such times of all runnable and
+ * blocked sched_entities.
+ * The 64 bit load_sum can:
+ * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
+ * the highest weight (=88761) always runnable, we should not overflow
+ * 2) for entity, support any load.weight always runnable
+ */
struct sched_avg {
- u64 last_runnable_update;
- s64 decay_count;
- /*
- * utilization_avg_contrib describes the amount of time that a
- * sched_entity is running on a CPU. It is based on running_avg_sum
- * and is scaled in the range [0..SCHED_LOAD_SCALE].
- * load_avg_contrib described the amount of time that a sched_entity
- * is runnable on a rq. It is based on both runnable_avg_sum and the
- * weight of the task.
- */
- unsigned long load_avg_contrib, utilization_avg_contrib;
- /*
- * These sums represent an infinite geometric series and so are bound
- * above by 1024/(1-y). Thus we only need a u32 to store them for all
- * choices of y < 1-2^(-32)*1024.
- * running_avg_sum reflects the time that the sched_entity is
- * effectively running on the CPU.
- * runnable_avg_sum represents the amount of time a sched_entity is on
- * a runqueue which includes the running time that is monitored by
- * running_avg_sum.
- */
- u32 runnable_avg_sum, avg_period, running_avg_sum;
+ u64 last_update_time, load_sum;
+ u32 util_sum, period_contrib;
+ unsigned long load_avg, util_avg;
};
#ifdef CONFIG_SCHEDSTATS
@@ -1250,7 +1280,7 @@ struct sched_entity {
#endif
#ifdef CONFIG_SMP
- /* Per-entity load-tracking */
+ /* Per entity load average tracking */
struct sched_avg avg;
#endif
};
@@ -1322,10 +1352,12 @@ struct sched_dl_entity {
union rcu_special {
struct {
- bool blocked;
- bool need_qs;
- } b;
- short s;
+ u8 blocked;
+ u8 need_qs;
+ u8 exp_need_qs;
+ u8 pad; /* Otherwise the compiler can store garbage here. */
+ } b; /* Bits. */
+ u32 s; /* Set of bits. */
};
struct rcu_node;
@@ -1336,6 +1368,25 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ */
+ struct cpumask cpumask;
+
+ /* True if any bit in cpumask is set */
+ bool flush_required;
+
+ /*
+ * If true then the PTE was dirty when unmapped. The entry must be
+ * flushed before IO is initiated or a stale TLB entry potentially
+ * allows an update without redirtying the page.
+ */
+ bool writable;
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
volatile long saved_state; /* saved state for "spinlock sleepers" */
@@ -1347,9 +1398,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
- struct task_struct *last_wakee;
- unsigned long wakee_flips;
+ unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
+ struct task_struct *last_wakee;
int wake_cpu;
#endif
@@ -1388,8 +1439,6 @@ struct task_struct {
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
@@ -1399,7 +1448,7 @@ struct task_struct {
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
#endif
@@ -1410,9 +1459,6 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
-#ifdef CONFIG_COMPAT_BRK
- unsigned brk_randomized:1;
-#endif
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1423,22 +1469,29 @@ struct task_struct {
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
- unsigned int jobctl; /* JOBCTL_*, siglock protected */
+ unsigned long jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
- unsigned in_execve:1; /* Tell the LSMs that the process is doing an
- * execve */
- unsigned in_iowait:1;
-
- /* Revert to default priority/policy when forking */
+ /* scheduler bits, serialized by scheduler locks */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned sched_migrated:1;
+ unsigned :0; /* force alignment to the next boundary */
+ /* unserialized, strictly 'current' */
+ unsigned in_execve:1; /* bit to tell LSMs we're in execve */
+ unsigned in_iowait:1;
+#ifdef CONFIG_MEMCG
+ unsigned memcg_may_oom:1;
+#endif
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
@@ -1484,9 +1537,7 @@ struct task_struct {
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
-#endif
+ struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
raw_spinlock_t vtime_lock;
seqcount_t vtime_seq;
@@ -1519,7 +1570,7 @@ struct task_struct {
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
- int link_count, total_link_count;
+ struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
@@ -1529,8 +1580,6 @@ struct task_struct {
/* hung task detection */
unsigned long last_switch_count;
#endif
-/* CPU-specific state of this task */
- struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
@@ -1552,9 +1601,7 @@ struct task_struct {
unsigned long sas_ss_sp;
size_t sas_ss_size;
- int (*notifier)(void *priv);
- void *notifier_data;
- sigset_t *notifier_mask;
+
struct callback_head *task_works;
struct audit_context *audit_context;
@@ -1713,6 +1760,10 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
struct rcu_head rcu;
/*
@@ -1778,12 +1829,12 @@ struct task_struct {
#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG
- struct memcg_oom_info {
- struct mem_cgroup *memcg;
- gfp_t gfp_mask;
- int order;
- unsigned int may_oom:1;
- } memcg_oom;
+ struct mem_cgroup *memcg_in_oom;
+ gfp_t memcg_oom_gfp_mask;
+ int memcg_oom_order;
+
+ /* number of pages to reclaim on returning to userland */
+ unsigned int memcg_nr_pages_over_high;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
@@ -1806,9 +1857,26 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
+#endif
int pagefault_disabled;
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/*
+ * WARNING: on x86, 'thread_struct' contains a variable-sized
+ * structure. It *MUST* be at the end of 'task_struct'.
+ *
+ * Do not put anything below here!
+ */
};
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+extern int arch_task_struct_size __read_mostly;
+#else
+# define arch_task_struct_size (sizeof(struct task_struct))
+#endif
+
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
@@ -1980,7 +2048,8 @@ static inline int pid_alive(const struct task_struct *p)
}
/**
- * is_global_init - check if a task structure is init
+ * is_global_init - check if a task structure is init. Since init
+ * is free to have sub-threads we need to check tgid.
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
@@ -1989,7 +2058,7 @@ static inline int pid_alive(const struct task_struct *p)
*/
static inline int is_global_init(struct task_struct *tsk)
{
- return tsk->pid == 1;
+ return task_tgid_nr(tsk) == 1;
}
extern struct pid *cad_pid;
@@ -2168,22 +2237,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
-#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
-#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
-#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
-#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
-#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
-#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
-#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
static inline void rcu_copy_process(struct task_struct *p)
{
@@ -2246,13 +2315,6 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#ifndef CONFIG_CPUMASK_OFFSTACK
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
- return set_cpus_allowed_ptr(p, &new_mask);
-}
-#endif
-
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2464,26 +2526,33 @@ extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
-extern void __flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
-static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+static inline int kernel_dequeue_signal(siginfo_t *info)
{
- unsigned long flags;
+ struct task_struct *tsk = current;
+ siginfo_t __info;
int ret;
- spin_lock_irqsave(&tsk->sighand->siglock, flags);
- ret = dequeue_signal(tsk, mask, info);
- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ spin_lock_irq(&tsk->sighand->siglock);
+ ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
+ spin_unlock_irq(&tsk->sighand->siglock);
return ret;
}
-extern void block_all_signals(int (*notifier)(void *priv), void *priv,
- sigset_t *mask);
-extern void unblock_all_signals(void);
+static inline void kernel_signal_stop(void)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ __set_current_state(TASK_STOPPED);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ schedule();
+}
+
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
@@ -2600,8 +2669,22 @@ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
+#ifdef CONFIG_HAVE_COPY_THREAD_TLS
+extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
+ struct task_struct *, unsigned long);
+#else
extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *);
+
+/* Architectures that haven't opted into copy_thread_tls get the tls argument
+ * via pt_regs, so ignore the tls argument passed via C. */
+static inline int copy_thread_tls(
+ unsigned long clone_flags, unsigned long sp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
+{
+ return copy_thread(clone_flags, sp, arg, p);
+}
+#endif
extern void flush_thread(void);
extern void exit_thread(void);
@@ -2620,6 +2703,7 @@ extern int do_execveat(int, struct filename *,
const char __user * const __user *,
const char __user * const __user *,
int);
+extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2643,6 +2727,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
+#define tasklist_empty() \
+ list_empty(&init_task.tasks)
+
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
@@ -2751,53 +2838,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
-#ifdef CONFIG_CGROUPS
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
- down_read(&tsk->signal->group_rwsem);
-}
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
- up_read(&tsk->signal->group_rwsem);
-}
-
/**
- * threadgroup_lock - lock threadgroup
- * @tsk: member task of the threadgroup to lock
+ * threadgroup_change_begin - mark the beginning of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
- * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid. This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
- *
- * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
- * synchronization. While held, no new task will be added to threadgroup
- * and no existing live task will have its PF_EXITING set.
- *
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
+ * All operations which modify a threadgroup - a new thread joining the
+ * group, death of a member thread (the assertion of PF_EXITING) and
+ * exec(2) dethreading the process and replacing the leader - are wrapped
+ * by threadgroup_change_{begin|end}(). This is to provide a place which
+ * subsystems needing threadgroup stability can hook into for
+ * synchronization.
*/
-static inline void threadgroup_lock(struct task_struct *tsk)
+static inline void threadgroup_change_begin(struct task_struct *tsk)
{
- down_write(&tsk->signal->group_rwsem);
+ might_sleep();
+ cgroup_threadgroup_change_begin(tsk);
}
/**
- * threadgroup_unlock - unlock threadgroup
- * @tsk: member task of the threadgroup to unlock
+ * threadgroup_change_end - mark the end of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * Reverse threadgroup_lock().
+ * See threadgroup_change_begin().
*/
-static inline void threadgroup_unlock(struct task_struct *tsk)
+static inline void threadgroup_change_end(struct task_struct *tsk)
{
- up_write(&tsk->signal->group_rwsem);
+ cgroup_threadgroup_change_end(tsk);
}
-#else
-static inline void threadgroup_change_begin(struct task_struct *tsk) {}
-static inline void threadgroup_change_end(struct task_struct *tsk) {}
-static inline void threadgroup_lock(struct task_struct *tsk) {}
-static inline void threadgroup_unlock(struct task_struct *tsk) {}
-#endif
#ifndef __HAVE_THREAD_FUNCTIONS
@@ -3027,12 +3094,6 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
-
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
@@ -3159,11 +3220,6 @@ static __always_inline bool need_resched(void)
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
-static inline void thread_group_cputime_init(struct signal_struct *sig)
-{
- raw_spin_lock_init(&sig->cputimer.lock);
-}
-
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
@@ -3226,14 +3282,19 @@ static inline int __migrate_disabled(struct task_struct *p)
/* Future-safe accessor for struct task_struct's cpus_allowed. */
static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (p->migrate_disable)
+ if (__migrate_disabled(p))
return cpumask_of(task_cpu(p));
-#endif
return &p->cpus_allowed;
}
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+{
+ if (__migrate_disabled(p))
+ return 1;
+ return p->nr_cpus_allowed;
+}
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
@@ -3297,13 +3358,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
diff --git a/kernel/include/linux/sched/deadline.h b/kernel/include/linux/sched/deadline.h
index 9d303b884..9089a2ae9 100644
--- a/kernel/include/linux/sched/deadline.h
+++ b/kernel/include/linux/sched/deadline.h
@@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p)
return dl_prio(p->prio);
}
+static inline bool dl_time_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
#endif /* _SCHED_DEADLINE_H */
diff --git a/kernel/include/linux/sched/sysctl.h b/kernel/include/linux/sched/sysctl.h
index 596a0e007..c9e4731cf 100644
--- a/kernel/include/linux/sched/sysctl.h
+++ b/kernel/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return 1;
-}
-#endif
/*
* control realtime throttling:
diff --git a/kernel/include/linux/scif.h b/kernel/include/linux/scif.h
new file mode 100644
index 000000000..49a35d6ed
--- /dev/null
+++ b/kernel/include/linux/scif.h
@@ -0,0 +1,1339 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef __SCIF_H__
+#define __SCIF_H__
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/scif_ioctl.h>
+
+#define SCIF_ACCEPT_SYNC 1
+#define SCIF_SEND_BLOCK 1
+#define SCIF_RECV_BLOCK 1
+
+enum {
+ SCIF_PROT_READ = (1 << 0),
+ SCIF_PROT_WRITE = (1 << 1)
+};
+
+enum {
+ SCIF_MAP_FIXED = 0x10,
+ SCIF_MAP_KERNEL = 0x20,
+};
+
+enum {
+ SCIF_FENCE_INIT_SELF = (1 << 0),
+ SCIF_FENCE_INIT_PEER = (1 << 1),
+ SCIF_SIGNAL_LOCAL = (1 << 4),
+ SCIF_SIGNAL_REMOTE = (1 << 5)
+};
+
+enum {
+ SCIF_RMA_USECPU = (1 << 0),
+ SCIF_RMA_USECACHE = (1 << 1),
+ SCIF_RMA_SYNC = (1 << 2),
+ SCIF_RMA_ORDERED = (1 << 3)
+};
+
+/* End of SCIF Admin Reserved Ports */
+#define SCIF_ADMIN_PORT_END 1024
+
+/* End of SCIF Reserved Ports */
+#define SCIF_PORT_RSVD 1088
+
+typedef struct scif_endpt *scif_epd_t;
+typedef struct scif_pinned_pages *scif_pinned_pages_t;
+
+/**
+ * struct scif_range - SCIF registered range used in kernel mode
+ * @cookie: cookie used internally by SCIF
+ * @nr_pages: number of pages of PAGE_SIZE
+ * @prot_flags: R/W protection
+ * @phys_addr: Array of bus addresses
+ * @va: Array of kernel virtual addresses backed by the pages in the phys_addr
+ * array. The va is populated only when called on the host for a remote
+ * SCIF connection on MIC. This is required to support the use case of DMA
+ * between MIC and another device which is not a SCIF node e.g., an IB or
+ * ethernet NIC.
+ */
+struct scif_range {
+ void *cookie;
+ int nr_pages;
+ int prot_flags;
+ dma_addr_t *phys_addr;
+ void __iomem **va;
+};
+
+/**
+ * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll
+ * @epd: SCIF endpoint
+ * @events: requested events
+ * @revents: returned events
+ */
+struct scif_pollepd {
+ scif_epd_t epd;
+ short events;
+ short revents;
+};
+
+/**
+ * scif_peer_dev - representation of a peer SCIF device
+ *
+ * Peer devices show up as PCIe devices for the mgmt node but not the cards.
+ * The mgmt node discovers all the cards on the PCIe bus and informs the other
+ * cards about their peers. Upon notification of a peer a node adds a peer
+ * device to the peer bus to maintain symmetry in the way devices are
+ * discovered across all nodes in the SCIF network.
+ *
+ * @dev: underlying device
+ * @dnode - The destination node which this device will communicate with.
+ */
+struct scif_peer_dev {
+ struct device dev;
+ u8 dnode;
+};
+
+/**
+ * scif_client - representation of a SCIF client
+ * @name: client name
+ * @probe - client method called when a peer device is registered
+ * @remove - client method called when a peer device is unregistered
+ * @si - subsys_interface used internally for implementing SCIF clients
+ */
+struct scif_client {
+ const char *name;
+ void (*probe)(struct scif_peer_dev *spdev);
+ void (*remove)(struct scif_peer_dev *spdev);
+ struct subsys_interface si;
+};
+
+#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
+#define SCIF_REGISTER_FAILED ((off_t)-1)
+#define SCIF_MMAP_FAILED ((void *)-1)
+
+/**
+ * scif_open() - Create an endpoint
+ *
+ * Return:
+ * Upon successful completion, scif_open() returns an endpoint descriptor to
+ * be used in subsequent SCIF functions calls to refer to that endpoint;
+ * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
+ * returned and errno is set to indicate the error; in kernel mode a NULL
+ * scif_epd_t is returned.
+ *
+ * Errors:
+ * ENOMEM - Insufficient kernel memory was available
+ */
+scif_epd_t scif_open(void);
+
+/**
+ * scif_bind() - Bind an endpoint to a port
+ * @epd: endpoint descriptor
+ * @pn: port number
+ *
+ * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
+ * local node. If pn is zero, a port number greater than or equal to
+ * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
+ * exactly one local port. Ports less than 1024 when requested can only be bound
+ * by system (or root) processes or by processes executed by privileged users.
+ *
+ * Return:
+ * Upon successful completion, scif_bind() returns the port number to which epd
+ * is bound; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint or the port is already bound
+ * EISCONN - The endpoint is already connected
+ * ENOSPC - No port number available for assignment
+ * EACCES - The port requested is protected and the user is not the superuser
+ */
+int scif_bind(scif_epd_t epd, u16 pn);
+
+/**
+ * scif_listen() - Listen for connections on an endpoint
+ * @epd: endpoint descriptor
+ * @backlog: maximum pending connection requests
+ *
+ * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
+ * an endpoint that will be used to accept incoming connection requests. Once
+ * so marked, the endpoint is said to be in the listening state and may not be
+ * used as the endpoint of a connection.
+ *
+ * The endpoint, epd, must have been bound to a port.
+ *
+ * The backlog argument defines the maximum length to which the queue of
+ * pending connections for epd may grow. If a connection request arrives when
+ * the queue is full, the client may receive an error with an indication that
+ * the connection was refused.
+ *
+ * Return:
+ * Upon successful completion, scif_listen() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint is not bound to a port
+ * EISCONN - The endpoint is already connected or listening
+ */
+int scif_listen(scif_epd_t epd, int backlog);
+
+/**
+ * scif_connect() - Initiate a connection on a port
+ * @epd: endpoint descriptor
+ * @dst: global id of port to which to connect
+ *
+ * The scif_connect() function requests the connection of endpoint epd to remote
+ * port dst. If the connection is successful, a peer endpoint, bound to dst, is
+ * created on node dst.node. On successful return, the connection is complete.
+ *
+ * If the endpoint epd has not already been bound to a port, scif_connect()
+ * will bind it to an unused local port.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * In user space, scif_connect() supports an asynchronous connection mode
+ * if the application has set the O_NONBLOCK flag on the endpoint via the
+ * fcntl() system call. Setting this flag will result in the calling process
+ * not to wait during scif_connect().
+ *
+ * Return:
+ * Upon successful completion, scif_connect() returns the port ID to which the
+ * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
+ * set to indicate the error; in kernel mode the negative of one of the
+ * following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNREFUSED - The destination was not listening for connections or refused
+ * the connection request
+ * EINVAL - dst.port is not a valid port ID
+ * EISCONN - The endpoint is already connected
+ * ENOMEM - No buffer space is available
+ * ENODEV - The destination node does not exist, or the node is lost or existed,
+ * but is not currently in the network since it may have crashed
+ * ENOSPC - No port number available for assignment
+ * EOPNOTSUPP - The endpoint is listening and cannot be connected
+ */
+int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
+
+/**
+ * scif_accept() - Accept a connection on an endpoint
+ * @epd: endpoint descriptor
+ * @peer: global id of port to which connected
+ * @newepd: new connected endpoint descriptor
+ * @flags: flags
+ *
+ * The scif_accept() call extracts the first connection request from the queue
+ * of pending connections for the port on which epd is listening. scif_accept()
+ * creates a new endpoint, bound to the same port as epd, and allocates a new
+ * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
+ * endpoint is connected to the endpoint through which the connection was
+ * requested. epd is unaffected by this call, and remains in the listening
+ * state.
+ *
+ * On successful return, peer holds the global port identifier (node id and
+ * local port number) of the port which requested the connection.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * The number of connections that can (subsequently) be accepted on epd is only
+ * limited by system resources (memory).
+ *
+ * The flags argument is formed by OR'ing together zero or more of the
+ * following values.
+ * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
+ * SCIF_ACCEPT_SYNC is not in flags, and no pending
+ * connections are present on the queue, scif_accept()
+ * fails with an EAGAIN error
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when there is a connection request. In kernel mode, the scif_poll()
+ * function may be used for this purpose. A readable event will be delivered
+ * when a connection is requested.
+ *
+ * Return:
+ * Upon successful completion, scif_accept() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
+ * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
+ * its connection request
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINTR - Interrupted function
+ * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
+ * NULL, or newepd is NULL
+ * ENODEV - The requesting node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOENT - Secondary part of epd registration failed
+ */
+int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
+ *newepd, int flags);
+
+/**
+ * scif_close() - Close an endpoint
+ * @epd: endpoint descriptor
+ *
+ * scif_close() closes an endpoint and performs necessary teardown of
+ * facilities associated with that endpoint.
+ *
+ * If epd is a listening endpoint then it will no longer accept connection
+ * requests on the port to which it is bound. Any pending connection requests
+ * are rejected.
+ *
+ * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
+ * which are in-process through epd or its peer endpoint will complete before
+ * scif_close() returns. Registered windows of the local and peer endpoints are
+ * released as if scif_unregister() was called against each window.
+ *
+ * Closing a SCIF endpoint does not affect local registered memory mapped by
+ * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
+ * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
+ *
+ * If the peer endpoint's receive queue is not empty at the time that epd is
+ * closed, then the peer endpoint can be passed as the endpoint parameter to
+ * scif_recv() until the receive queue is empty.
+ *
+ * epd is freed and may no longer be accessed.
+ *
+ * Return:
+ * Upon successful completion, scif_close() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ */
+int scif_close(scif_epd_t epd);
+
+/**
+ * scif_send() - Send a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message length
+ * @flags: blocking mode flags
+ *
+ * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
+ * are copied from memory starting at address msg. On successful execution the
+ * return value of scif_send() is the number of bytes that were sent, and is
+ * zero if no bytes were sent because len was zero. scif_send() may be called
+ * only when the endpoint is in a connected state.
+ *
+ * If a scif_send() call is non-blocking, then it sends only those bytes which
+ * can be sent without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_send() call is blocking, then it normally returns after sending
+ * all len bytes. If a blocking call is interrupted or the connection is
+ * reset, the call is considered successful if some bytes were sent or len is
+ * zero, otherwise the call is considered unsuccessful.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when the send queue is not full. In kernel mode, the scif_poll() function
+ * may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
+ * is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_send() returns the number of bytes sent;
+ * otherwise in user mode -1 is returned and errno is set to indicate the
+ * error; in kernel mode the negative of one of the following errors is
+ * returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_send(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_recv() - Receive a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message buffer length
+ * @flags: blocking mode flags
+ *
+ * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
+ * data are copied to memory starting at address msg. On successful execution
+ * the return value of scif_recv() is the number of bytes that were received,
+ * and is zero if no bytes were received because len was zero. scif_recv() may
+ * be called only when the endpoint is in a connected state.
+ *
+ * If a scif_recv() call is non-blocking, then it receives only those bytes
+ * which can be received without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_recv() call is blocking, then it normally returns after receiving
+ * all len bytes. If the blocking call was interrupted due to a disconnection,
+ * subsequent calls to scif_recv() will copy all bytes received upto the point
+ * of disconnection.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when data is available to be received. In kernel mode, the scif_poll()
+ * function may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_recv() will block until the entire message is received if
+ * SCIF_RECV_BLOCK is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_recv() returns the number of bytes
+ * received; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EAGAIN - The destination node is returning from a low power state
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_register() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @addr: starting virtual address
+ * @len: length of range
+ * @offset: offset of window
+ * @prot_flags: read/write protection flags
+ * @map_flags: mapping flags
+ *
+ * The scif_register() function opens a window, a range of whole pages of the
+ * registered address space of the endpoint epd, starting at offset po and
+ * continuing for len bytes. The value of po, further described below, is a
+ * function of the parameters offset and len, and the value of map_flags. Each
+ * page of the window represents the physical memory page which backs the
+ * corresponding page of the range of virtual address pages starting at addr
+ * and continuing for len bytes. addr and len are constrained to be multiples
+ * of the page size. A successful scif_register() call returns po.
+ *
+ * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
+ * exactly, and offset is constrained to be a multiple of the page size. The
+ * mapping established by scif_register() will not replace any existing
+ * registration; an error is returned if any page within the range [offset,
+ * offset + len - 1] intersects an existing window.
+ *
+ * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
+ * implementation-defined manner to arrive at po. The po value so chosen will
+ * be an area of the registered address space that the implementation deems
+ * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
+ * granting the implementation complete freedom in selecting po, subject to
+ * constraints described below. A non-zero value of offset is taken to be a
+ * suggestion of an offset near which the mapping should be placed. When the
+ * implementation selects a value for po, it does not replace any extant
+ * window. In all cases, po will be a multiple of the page size.
+ *
+ * The physical pages which are so represented by a window are available for
+ * access in calls to mmap(), scif_readfrom(), scif_writeto(),
+ * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
+ * physical pages represented by the window will not be reused by the memory
+ * subsystem for any other purpose. Note that the same physical page may be
+ * represented by multiple windows.
+ *
+ * Subsequent operations which change the memory pages to which virtual
+ * addresses are mapped (such as mmap(), munmap()) have no effect on
+ * existing window.
+ *
+ * If the process will fork(), it is recommended that the registered
+ * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
+ * problems due to copy-on-write semantics.
+ *
+ * The prot_flags argument is formed by OR'ing together one or more of the
+ * following values.
+ * SCIF_PROT_READ - allow read operations from the window
+ * SCIF_PROT_WRITE - allow write operations to the window
+ *
+ * Return:
+ * Upon successful completion, scif_register() returns the offset at which the
+ * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
+ * is (off_t *)-1) is returned and errno is set to indicate the error; in
+ * kernel mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
+ * [offset, offset + len -1] are already registered
+ * EAGAIN - The mapping could not be performed due to lack of resources
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
+ * set in flags, and offset is not a multiple of the page size, or addr is not a
+ * multiple of the page size, or len is not a multiple of the page size, or is
+ * 0, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN -The endpoint is not connected
+ */
+off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ int prot_flags, int map_flags);
+
+/**
+ * scif_unregister() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @offset: start of range to unregister
+ * @len: length of range to unregister
+ *
+ * The scif_unregister() function closes those previously registered windows
+ * which are entirely within the range [offset, offset + len - 1]. It is an
+ * error to specify a range which intersects only a subrange of a window.
+ *
+ * On a successful return, pages within the window may no longer be specified
+ * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
+ * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
+ * however, continues to exist until all previous references against it are
+ * removed. A window is referenced if there is a mapping to it created by
+ * mmap(), or if scif_get_pages() was called against the window
+ * (and the pages have not been returned via scif_put_pages()). A window is
+ * also referenced while an RMA, in which some range of the window is a source
+ * or destination, is in progress. Finally a window is referenced while some
+ * offset in that window was specified to scif_fence_signal(), and the RMAs
+ * marked by that call to scif_fence_signal() have not completed. While a
+ * window is in this state, its registered address space pages are not
+ * available for use in a new registered window.
+ *
+ * When all such references to the window have been removed, its references to
+ * all the physical pages which it represents are removed. Similarly, the
+ * registered address space pages of the window become available for
+ * registration in a new window.
+ *
+ * Return:
+ * Upon successful completion, scif_unregister() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned. In the event of an
+ * error, no windows are unregistered.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
+ * window, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
+
+/**
+ * scif_readfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space to
+ * which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_readfrom() copies len bytes from the remote registered address space of
+ * the peer of endpoint epd, starting at the offset roffset to the local
+ * registered address space of epd, starting at the offset loffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if loffset and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_writeto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space
+ * from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_writeto() copies len bytes from the local registered address space of
+ * epd, starting at the offset loffset to the remote registered address space
+ * of the peer of endpoint epd, starting at the offset roffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if loffset and roffset are not separated by a multiple
+ * of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset , roffset + len -1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_vreadfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address to which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vreadfrom() copies len bytes from the remote registered address
+ * space of the peer of endpoint epd, starting at the offset roffset, to local
+ * memory, starting at addr.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may
+ * intersect multiple registered windows, but only if those windows are
+ * contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if addr and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - enable registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_vwriteto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
+ * the remote registered address space of the peer of endpoint epd, starting at
+ * the offset roffset.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may intersect
+ * multiple registered windows, but only if those windows are contiguous in the
+ * registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and offset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and offset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if addr and offset are not separated by a multiple of
+ * 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - allow registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_fence_mark() - Mark previously issued RMAs
+ * @epd: endpoint descriptor
+ * @flags: control flags
+ * @mark: marked value returned as output.
+ *
+ * scif_fence_mark() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
+ * marked with a value returned at mark. The application may subsequently call
+ * scif_fence_wait(), passing the value returned at mark, to await completion
+ * of all RMAs so marked.
+ *
+ * The flags argument has exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ *
+ * Return:
+ * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
+
+/**
+ * scif_fence_wait() - Wait for completion of marked RMAs
+ * @epd: endpoint descriptor
+ * @mark: mark request
+ *
+ * scif_fence_wait() returns after all RMAs marked with mark have completed.
+ * The value passed in mark must have been obtained in a previous call to
+ * scif_fence_mark().
+ *
+ * Return:
+ * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_wait(scif_epd_t epd, int mark);
+
+/**
+ * scif_fence_signal() - Request a memory update on completion of RMAs
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
+ *
+ * scif_fence_signal() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or marking the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd.
+ *
+ * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
+ * marked set, lval is written to memory at the address corresponding to offset
+ * loff in the local registered address space of epd. loff must be within a
+ * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
+ * of the RMAs in the marked set, rval is written to memory at the address
+ * corresponding to offset roff in the remote registered address space of epd.
+ * roff must be within a remote registered window of the peer of epd. Note
+ * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
+ *
+ * The flags argument is formed by OR'ing together the following.
+ * Exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ * One or more of the following values.
+ * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
+ * memory at the address corresponding to offset loff in the local
+ * registered address space of epd.
+ * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
+ * memory at the address corresponding to offset roff in the remote
+ * registered address space of epd.
+ *
+ * Return:
+ * Upon successful completion, scif_fence_signal() returns 0; otherwise in
+ * user mode -1 is returned and errno is set to indicate the error; in kernel
+ * mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
+ * for the registered address space, of the peer of epd
+ */
+int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
+ u64 rval, int flags);
+
+/**
+ * scif_get_node_ids() - Return information about online nodes
+ * @nodes: array in which to return online node IDs
+ * @len: number of entries in the nodes array
+ * @self: address to place the node ID of the local node
+ *
+ * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
+ * nodes in the SCIF network. If there is not enough space in nodes, as
+ * indicated by the len parameter, only len node IDs are returned in nodes. The
+ * return value of scif_get_node_ids() is the total number of nodes currently in
+ * the SCIF network. By checking the return value against the len parameter,
+ * the user may determine if enough space for nodes was allocated.
+ *
+ * The node ID of the local node is returned at self.
+ *
+ * Return:
+ * Upon successful completion, scif_get_node_ids() returns the actual number of
+ * online nodes in the SCIF network including 'self'; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode no
+ * errors are returned.
+ */
+int scif_get_node_ids(u16 *nodes, int len, u16 *self);
+
+/**
+ * scif_pin_pages() - Pin a set of pages
+ * @addr: Virtual address of range to pin
+ * @len: Length of range to pin
+ * @prot_flags: Page protection flags
+ * @map_flags: Page classification flags
+ * @pinned_pages: Handle to pinned pages
+ *
+ * scif_pin_pages() pins (locks in physical memory) the physical pages which
+ * back the range of virtual address pages starting at addr and continuing for
+ * len bytes. addr and len are constrained to be multiples of the page size. A
+ * successful scif_pin_pages() call returns a handle to pinned_pages which may
+ * be used in subsequent calls to scif_register_pinned_pages().
+ *
+ * The pages will remain pinned as long as there is a reference against the
+ * scif_pinned_pages_t value returned by scif_pin_pages() and until
+ * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A
+ * reference is added to a scif_pinned_pages_t value each time a window is
+ * created by calling scif_register_pinned_pages() and passing the
+ * scif_pinned_pages_t value. A reference is removed from a
+ * scif_pinned_pages_t value each time such a window is deleted.
+ *
+ * Subsequent operations which change the memory pages to which virtual
+ * addresses are mapped (such as mmap(), munmap()) have no effect on the
+ * scif_pinned_pages_t value or windows created against it.
+ *
+ * If the process will fork(), it is recommended that the registered
+ * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
+ * problems due to copy-on-write semantics.
+ *
+ * The prot_flags argument is formed by OR'ing together one or more of the
+ * following values.
+ * SCIF_PROT_READ - allow read operations against the pages
+ * SCIF_PROT_WRITE - allow write operations against the pages
+ * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a
+ * kernel space address. By default, addr is interpreted as a user space
+ * address.
+ *
+ * Return:
+ * Upon successful completion, scif_pin_pages() returns 0; otherwise the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative
+ * ENOMEM - Not enough space
+ */
+int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags,
+ scif_pinned_pages_t *pinned_pages);
+
+/**
+ * scif_unpin_pages() - Unpin a set of pages
+ * @pinned_pages: Handle to pinned pages to be unpinned
+ *
+ * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new
+ * windows against pinned_pages. The physical pages represented by pinned_pages
+ * will remain pinned until all windows previously registered against
+ * pinned_pages are deleted (the window is scif_unregister()'d and all
+ * references to the window are removed (see scif_unregister()).
+ *
+ * pinned_pages must have been obtain from a previous call to scif_pin_pages().
+ * After calling scif_unpin_pages(), it is an error to pass pinned_pages to
+ * scif_register_pinned_pages().
+ *
+ * Return:
+ * Upon successful completion, scif_unpin_pages() returns 0; otherwise the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EINVAL - pinned_pages is not valid
+ */
+int scif_unpin_pages(scif_pinned_pages_t pinned_pages);
+
+/**
+ * scif_register_pinned_pages() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @pinned_pages: Handle to pinned pages
+ * @offset: Registered address space offset
+ * @map_flags: Flags which control where pages are mapped
+ *
+ * The scif_register_pinned_pages() function opens a window, a range of whole
+ * pages of the registered address space of the endpoint epd, starting at
+ * offset po. The value of po, further described below, is a function of the
+ * parameters offset and pinned_pages, and the value of map_flags. Each page of
+ * the window represents a corresponding physical memory page of the range
+ * represented by pinned_pages; the length of the window is the same as the
+ * length of range represented by pinned_pages. A successful
+ * scif_register_pinned_pages() call returns po as the return value.
+ *
+ * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
+ * exactly, and offset is constrained to be a multiple of the page size. The
+ * mapping established by scif_register_pinned_pages() will not replace any
+ * existing registration; an error is returned if any page of the new window
+ * would intersect an existing window.
+ *
+ * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
+ * implementation-defined manner to arrive at po. The po so chosen will be an
+ * area of the registered address space that the implementation deems suitable
+ * for a mapping of the required size. An offset value of 0 is interpreted as
+ * granting the implementation complete freedom in selecting po, subject to
+ * constraints described below. A non-zero value of offset is taken to be a
+ * suggestion of an offset near which the mapping should be placed. When the
+ * implementation selects a value for po, it does not replace any extant
+ * window. In all cases, po will be a multiple of the page size.
+ *
+ * The physical pages which are so represented by a window are available for
+ * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(),
+ * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
+ * physical pages represented by the window will not be reused by the memory
+ * subsystem for any other purpose. Note that the same physical page may be
+ * represented by multiple windows.
+ *
+ * Windows created by scif_register_pinned_pages() are unregistered by
+ * scif_unregister().
+ *
+ * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
+ * fixed offset.
+ *
+ * Return:
+ * Upon successful completion, scif_register_pinned_pages() returns the offset
+ * at which the mapping was placed (po); otherwise the negative of one of the
+ * following errors is returned.
+ *
+ * Errors:
+ * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window
+ * would intersect an existing window
+ * EAGAIN - The mapping could not be performed due to lack of resources
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and
+ * offset is not a multiple of the page size, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+off_t scif_register_pinned_pages(scif_epd_t epd,
+ scif_pinned_pages_t pinned_pages,
+ off_t offset, int map_flags);
+
+/**
+ * scif_get_pages() - Add references to remote registered pages
+ * @epd: endpoint descriptor
+ * @offset: remote registered offset
+ * @len: length of range of pages
+ * @pages: returned scif_range structure
+ *
+ * scif_get_pages() returns the addresses of the physical pages represented by
+ * those pages of the registered address space of the peer of epd, starting at
+ * offset and continuing for len bytes. offset and len are constrained to be
+ * multiples of the page size.
+ *
+ * All of the pages in the specified range [offset, offset + len - 1] must be
+ * within a single window of the registered address space of the peer of epd.
+ *
+ * The addresses are returned as a virtually contiguous array pointed to by the
+ * phys_addr component of the scif_range structure whose address is returned in
+ * pages. The nr_pages component of scif_range is the length of the array. The
+ * prot_flags component of scif_range holds the protection flag value passed
+ * when the pages were registered.
+ *
+ * Each physical page whose address is returned by scif_get_pages() remains
+ * available and will not be released for reuse until the scif_range structure
+ * is returned in a call to scif_put_pages(). The scif_range structure returned
+ * by scif_get_pages() must be unmodified.
+ *
+ * It is an error to call scif_close() on an endpoint on which a scif_range
+ * structure of that endpoint has not been returned to scif_put_pages().
+ *
+ * Return:
+ * Upon successful completion, scif_get_pages() returns 0; otherwise the
+ * negative of one of the following errors is returned.
+ * Errors:
+ * ECONNRESET - Connection reset by peer.
+ * EINVAL - offset is not a multiple of the page size, or offset is negative, or
+ * len is not a multiple of the page size
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid
+ * for the registered address space of the peer epd
+ */
+int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
+ struct scif_range **pages);
+
+/**
+ * scif_put_pages() - Remove references from remote registered pages
+ * @pages: pages to be returned
+ *
+ * scif_put_pages() releases a scif_range structure previously obtained by
+ * calling scif_get_pages(). The physical pages represented by pages may
+ * be reused when the window which represented those pages is unregistered.
+ * Therefore, those pages must not be accessed after calling scif_put_pages().
+ *
+ * Return:
+ * Upon successful completion, scif_put_pages() returns 0; otherwise the
+ * negative of one of the following errors is returned.
+ * Errors:
+ * EINVAL - pages does not point to a valid scif_range structure, or
+ * the scif_range structure pointed to by pages was already returned
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_put_pages(struct scif_range *pages);
+
+/**
+ * scif_poll() - Wait for some event on an endpoint
+ * @epds: Array of endpoint descriptors
+ * @nepds: Length of epds
+ * @timeout: Upper limit on time for which scif_poll() will block
+ *
+ * scif_poll() waits for one of a set of endpoints to become ready to perform
+ * an I/O operation.
+ *
+ * The epds argument specifies the endpoint descriptors to be examined and the
+ * events of interest for each endpoint descriptor. epds is a pointer to an
+ * array with one member for each open endpoint descriptor of interest.
+ *
+ * The number of items in the epds array is specified in nepds. The epd field
+ * of scif_pollepd is an endpoint descriptor of an open endpoint. The field
+ * events is a bitmask specifying the events which the application is
+ * interested in. The field revents is an output parameter, filled by the
+ * kernel with the events that actually occurred. The bits returned in revents
+ * can include any of those specified in events, or one of the values POLLERR,
+ * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events
+ * field, and will be set in the revents field whenever the corresponding
+ * condition is true.)
+ *
+ * If none of the events requested (and no error) has occurred for any of the
+ * endpoint descriptors, then scif_poll() blocks until one of the events occurs.
+ *
+ * The timeout argument specifies an upper limit on the time for which
+ * scif_poll() will block, in milliseconds. Specifying a negative value in
+ * timeout means an infinite timeout.
+ *
+ * The following bits may be set in events and returned in revents.
+ * POLLIN - Data may be received without blocking. For a connected
+ * endpoint, this means that scif_recv() may be called without blocking. For a
+ * listening endpoint, this means that scif_accept() may be called without
+ * blocking.
+ * POLLOUT - Data may be sent without blocking. For a connected endpoint, this
+ * means that scif_send() may be called without blocking. POLLOUT may also be
+ * used to block waiting for a non-blocking connect to complete. This bit value
+ * has no meaning for a listening endpoint and is ignored if specified.
+ *
+ * The following bits are only returned in revents, and are ignored if set in
+ * events.
+ * POLLERR - An error occurred on the endpoint
+ * POLLHUP - The connection to the peer endpoint was disconnected
+ * POLLNVAL - The specified endpoint descriptor is invalid.
+ *
+ * Return:
+ * Upon successful completion, scif_poll() returns a non-negative value. A
+ * positive value indicates the total number of endpoint descriptors that have
+ * been selected (that is, endpoint descriptors for which the revents member is
+ * non-zero). A value of 0 indicates that the call timed out and no endpoint
+ * descriptors have been selected. Otherwise in user mode -1 is returned and
+ * errno is set to indicate the error; in kernel mode the negative of one of
+ * the following errors is returned.
+ *
+ * Errors:
+ * EINTR - A signal occurred before any requested event
+ * EINVAL - The nepds argument is greater than {OPEN_MAX}
+ * ENOMEM - There was no space to allocate file descriptor tables
+ */
+int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout);
+
+/**
+ * scif_client_register() - Register a SCIF client
+ * @client: client to be registered
+ *
+ * scif_client_register() registers a SCIF client. The probe() method
+ * of the client is called when SCIF peer devices come online and the
+ * remove() method is called when the peer devices disappear.
+ *
+ * Return:
+ * Upon successful completion, scif_client_register() returns a non-negative
+ * value. Otherwise the return value is the same as subsys_interface_register()
+ * in the kernel.
+ */
+int scif_client_register(struct scif_client *client);
+
+/**
+ * scif_client_unregister() - Unregister a SCIF client
+ * @client: client to be unregistered
+ *
+ * scif_client_unregister() unregisters a SCIF client.
+ *
+ * Return:
+ * None
+ */
+void scif_client_unregister(struct scif_client *client);
+
+#endif /* __SCIF_H__ */
diff --git a/kernel/include/linux/scpi_protocol.h b/kernel/include/linux/scpi_protocol.h
new file mode 100644
index 000000000..72ce932c6
--- /dev/null
+++ b/kernel/include/linux/scpi_protocol.h
@@ -0,0 +1,78 @@
+/*
+ * SCPI Message Protocol driver header
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+struct scpi_opp {
+ u32 freq;
+ u32 m_volt;
+} __packed;
+
+struct scpi_dvfs_info {
+ unsigned int count;
+ unsigned int latency; /* in nanoseconds */
+ struct scpi_opp *opps;
+};
+
+enum scpi_sensor_class {
+ TEMPERATURE,
+ VOLTAGE,
+ CURRENT,
+ POWER,
+};
+
+struct scpi_sensor_info {
+ u16 sensor_id;
+ u8 class;
+ u8 trigger_type;
+ char name[20];
+} __packed;
+
+/**
+ * struct scpi_ops - represents the various operations provided
+ * by SCP through SCPI message protocol
+ * @get_version: returns the major and minor revision on the SCPI
+ * message protocol
+ * @clk_get_range: gets clock range limit(min - max in Hz)
+ * @clk_get_val: gets clock value(in Hz)
+ * @clk_set_val: sets the clock value, setting to 0 will disable the
+ * clock (if supported)
+ * @dvfs_get_idx: gets the Operating Point of the given power domain.
+ * OPP is an index to the list return by @dvfs_get_info
+ * @dvfs_set_idx: sets the Operating Point of the given power domain.
+ * OPP is an index to the list return by @dvfs_get_info
+ * @dvfs_get_info: returns the DVFS capabilities of the given power
+ * domain. It includes the OPP list and the latency information
+ */
+struct scpi_ops {
+ u32 (*get_version)(void);
+ int (*clk_get_range)(u16, unsigned long *, unsigned long *);
+ unsigned long (*clk_get_val)(u16);
+ int (*clk_set_val)(u16, unsigned long);
+ int (*dvfs_get_idx)(u8);
+ int (*dvfs_set_idx)(u8, u8);
+ struct scpi_dvfs_info *(*dvfs_get_info)(u8);
+ int (*sensor_get_capability)(u16 *sensors);
+ int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
+ int (*sensor_get_value)(u16, u32 *);
+};
+
+#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
+struct scpi_ops *get_scpi_ops(void);
+#else
+static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
+#endif
diff --git a/kernel/include/linux/seccomp.h b/kernel/include/linux/seccomp.h
index a19ddacda..2296e6b2f 100644
--- a/kernel/include/linux/seccomp.h
+++ b/kernel/include/linux/seccomp.h
@@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
static inline int seccomp_mode(struct seccomp *s)
{
- return 0;
+ return SECCOMP_MODE_DISABLED;
}
#endif /* CONFIG_SECCOMP */
@@ -95,4 +95,15 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
return;
}
#endif /* CONFIG_SECCOMP_FILTER */
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+extern long seccomp_get_filter(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
+#else
+static inline long seccomp_get_filter(struct task_struct *task,
+ unsigned long n, void __user *data)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/kernel/include/linux/security.h b/kernel/include/linux/security.h
index 18264ea9e..2f4c1f7aa 100644
--- a/kernel/include/linux/security.h
+++ b/kernel/include/linux/security.h
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/mm.h>
struct linux_binprm;
struct cred;
@@ -43,7 +44,6 @@ struct file;
struct vfsmount;
struct path;
struct qstr;
-struct nameidata;
struct iattr;
struct fown_struct;
struct file_operations;
@@ -54,9 +54,6 @@ struct xattr;
struct xfrm_sec_ctx;
struct mm_struct;
-/* Maximum number of letters for an LSM name string */
-#define SECURITY_NAME_MAX 10
-
/* If capable should audit the security request */
#define SECURITY_CAP_NOAUDIT 0
#define SECURITY_CAP_AUDIT 1
@@ -69,10 +66,7 @@ struct audit_krule;
struct user_namespace;
struct timezone;
-/*
- * These functions are in security/capability.c and are used
- * as the default capabilities functions
- */
+/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, int audit);
extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
@@ -114,10 +108,6 @@ struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
-
-void reset_security_ops(void);
-
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
@@ -188,1581 +178,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
opts->num_mnt_opts = 0;
}
-/**
- * struct security_operations - main security structure
- *
- * Security module identifier.
- *
- * @name:
- * A string that acts as a unique identifier for the LSM with max number
- * of characters = SECURITY_NAME_MAX.
- *
- * Security hooks for program execution operations.
- *
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- * @bprm_secureexec:
- * Return a boolean value (0 or 1) indicating whether a "secure exec"
- * is required. The flag is passed in the auxiliary table
- * on the initial stack to the ELF interpreter to indicate whether libc
- * should enable secure mode.
- * @bprm contains the linux_binprm structure.
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link to the file.
- * @dir contains the inode structure of the parent directory of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @nd contains the nameidata structure for the parent directory.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @mnt is the vfsmount where the dentry was looked up
- * @dentry contains the dentry structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained:
- * container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_create:
- * Check permission before creating a child process. See the clone(2)
- * manual page for definitions of the @clone_flags.
- * @clone_flags contains the flags indicating what should be shared.
- * Return 0 if permission is granted.
- * @task_free:
- * @task task being freed
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_fw_from_file:
- * Load firmware from userspace (not called for built-in firmware).
- * @file contains the file structure pointing to the file containing
- * the firmware to load. This argument will be NULL if the firmware
- * was loaded via the uevent-triggered blob-based interface exposed
- * by CONFIG_FW_LOADER_USER_HELPER.
- * @buf pointer to buffer containing firmware contents.
- * @size length of the firmware contents.
- * Return 0 if permission is granted.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_module_from_file:
- * Load a kernel module from userspace.
- * @file contains the file structure pointing to the file containing
- * the kernel module to load. If the module is being loaded from a blob,
- * this argument will be NULL.
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of the current
- * process for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (current->signal->rlim + resource).
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
- * @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
- * Return 0 if permission is granted.
- * @task_wait:
- * Check permission before allowing a process to reap a child process @p
- * and collect its status information.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching of network
- * authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to the given secid
- * @security_secmark_refcount_inc
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- * @skb_owned_by:
- * This hook sets the packet's owning sock.
- * @skb is the packet.
- * @sk the sock which owns the packet.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
- * NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @msq contains the message queue to act upon.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @shp contains the shared memory structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @shp contains the shared memory structure to be modified.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @sma contains the semaphore structure
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @sma contains the semaphore structure.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop
- * Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @sma contains the semaphore structure.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @binder_set_context_mgr
- * Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
- * Return 0 if permission is granted.
- * @binder_transaction
- * Check whether @from is allowed to invoke a binder transaction call
- * to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_binder
- * Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_file
- * Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
- * @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @audit: Whether to write an audit message or not
- * Return 0 if the capability is granted for @tsk.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec and timezone are defined in include/linux/time.h
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @rule contains any fields related to current LSM.
- * @rule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
- * @actx points to the audit context associated with the check.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @rule contains the allocated rule
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- *
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- * This is the main security structure.
- */
-struct security_operations {
- char name[SECURITY_NAME_MAX + 1];
-
- int (*binder_set_context_mgr) (struct task_struct *mgr);
- int (*binder_transaction) (struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_binder) (struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_file) (struct task_struct *from,
- struct task_struct *to, struct file *file);
-
- int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
- int (*ptrace_traceme) (struct task_struct *parent);
- int (*capget) (struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset) (struct cred *new,
- const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable) (const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
- int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
- int (*quota_on) (struct dentry *dentry);
- int (*syslog) (int type);
- int (*settime) (const struct timespec *ts, const struct timezone *tz);
- int (*vm_enough_memory) (struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds) (struct linux_binprm *bprm);
- int (*bprm_check_security) (struct linux_binprm *bprm);
- int (*bprm_secureexec) (struct linux_binprm *bprm);
- void (*bprm_committing_creds) (struct linux_binprm *bprm);
- void (*bprm_committed_creds) (struct linux_binprm *bprm);
-
- int (*sb_alloc_security) (struct super_block *sb);
- void (*sb_free_security) (struct super_block *sb);
- int (*sb_copy_data) (char *orig, char *copy);
- int (*sb_remount) (struct super_block *sb, void *data);
- int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
- int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
- int (*sb_statfs) (struct dentry *dentry);
- int (*sb_mount) (const char *dev_name, struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount) (struct vfsmount *mnt, int flags);
- int (*sb_pivotroot) (struct path *old_path,
- struct path *new_path);
- int (*sb_set_mnt_opts) (struct super_block *sb,
- struct security_mnt_opts *opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts) (const struct super_block *oldsb,
- struct super_block *newsb);
- int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
- int (*dentry_init_security) (struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
- u32 *ctxlen);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink) (struct path *dir, struct dentry *dentry);
- int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
- int (*path_rmdir) (struct path *dir, struct dentry *dentry);
- int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
- unsigned int dev);
- int (*path_truncate) (struct path *path);
- int (*path_symlink) (struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link) (struct dentry *old_dentry, struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry);
- int (*path_chmod) (struct path *path, umode_t mode);
- int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot) (struct path *path);
-#endif
-
- int (*inode_alloc_security) (struct inode *inode);
- void (*inode_free_security) (struct inode *inode);
- int (*inode_init_security) (struct inode *inode, struct inode *dir,
- const struct qstr *qstr, const char **name,
- void **value, size_t *len);
- int (*inode_create) (struct inode *dir,
- struct dentry *dentry, umode_t mode);
- int (*inode_link) (struct dentry *old_dentry,
- struct inode *dir, struct dentry *new_dentry);
- int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
- int (*inode_symlink) (struct inode *dir,
- struct dentry *dentry, const char *old_name);
- int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
- int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
- int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry);
- int (*inode_readlink) (struct dentry *dentry);
- int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
- int (*inode_permission) (struct inode *inode, int mask);
- int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr) (const struct path *path);
- int (*inode_setxattr) (struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- int (*inode_getxattr) (struct dentry *dentry, const char *name);
- int (*inode_listxattr) (struct dentry *dentry);
- int (*inode_removexattr) (struct dentry *dentry, const char *name);
- int (*inode_need_killpriv) (struct dentry *dentry);
- int (*inode_killpriv) (struct dentry *dentry);
- int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
- int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags);
- int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size);
- void (*inode_getsecid) (const struct inode *inode, u32 *secid);
-
- int (*file_permission) (struct file *file, int mask);
- int (*file_alloc_security) (struct file *file);
- void (*file_free_security) (struct file *file);
- int (*file_ioctl) (struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr) (unsigned long addr);
- int (*mmap_file) (struct file *file,
- unsigned long reqprot, unsigned long prot,
- unsigned long flags);
- int (*file_mprotect) (struct vm_area_struct *vma,
- unsigned long reqprot,
- unsigned long prot);
- int (*file_lock) (struct file *file, unsigned int cmd);
- int (*file_fcntl) (struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner) (struct file *file);
- int (*file_send_sigiotask) (struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive) (struct file *file);
- int (*file_open) (struct file *file, const struct cred *cred);
-
- int (*task_create) (unsigned long clone_flags);
- void (*task_free) (struct task_struct *task);
- int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
- void (*cred_free) (struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_module_from_file)(struct file *file);
- int (*task_fix_setuid) (struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid) (struct task_struct *p, pid_t pgid);
- int (*task_getpgid) (struct task_struct *p);
- int (*task_getsid) (struct task_struct *p);
- void (*task_getsecid) (struct task_struct *p, u32 *secid);
- int (*task_setnice) (struct task_struct *p, int nice);
- int (*task_setioprio) (struct task_struct *p, int ioprio);
- int (*task_getioprio) (struct task_struct *p);
- int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler) (struct task_struct *p);
- int (*task_getscheduler) (struct task_struct *p);
- int (*task_movememory) (struct task_struct *p);
- int (*task_kill) (struct task_struct *p,
- struct siginfo *info, int sig, u32 secid);
- int (*task_wait) (struct task_struct *p);
- int (*task_prctl) (int option, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5);
- void (*task_to_inode) (struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security) (struct msg_msg *msg);
- void (*msg_msg_free_security) (struct msg_msg *msg);
-
- int (*msg_queue_alloc_security) (struct msg_queue *msq);
- void (*msg_queue_free_security) (struct msg_queue *msq);
- int (*msg_queue_associate) (struct msg_queue *msq, int msqflg);
- int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd);
- int (*msg_queue_msgsnd) (struct msg_queue *msq,
- struct msg_msg *msg, int msqflg);
- int (*msg_queue_msgrcv) (struct msg_queue *msq,
- struct msg_msg *msg,
- struct task_struct *target,
- long type, int mode);
-
- int (*shm_alloc_security) (struct shmid_kernel *shp);
- void (*shm_free_security) (struct shmid_kernel *shp);
- int (*shm_associate) (struct shmid_kernel *shp, int shmflg);
- int (*shm_shmctl) (struct shmid_kernel *shp, int cmd);
- int (*shm_shmat) (struct shmid_kernel *shp,
- char __user *shmaddr, int shmflg);
-
- int (*sem_alloc_security) (struct sem_array *sma);
- void (*sem_free_security) (struct sem_array *sma);
- int (*sem_associate) (struct sem_array *sma, int semflg);
- int (*sem_semctl) (struct sem_array *sma, int cmd);
- int (*sem_semop) (struct sem_array *sma,
- struct sembuf *sops, unsigned nsops, int alter);
-
- int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr) (struct task_struct *p, char *name, char **value);
- int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size);
- int (*ismaclabel) (const char *name);
- int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx) (char *secdata, u32 seclen);
-
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk);
- int (*unix_may_send) (struct socket *sock, struct socket *other);
-
- int (*socket_create) (int family, int type, int protocol, int kern);
- int (*socket_post_create) (struct socket *sock, int family,
- int type, int protocol, int kern);
- int (*socket_bind) (struct socket *sock,
- struct sockaddr *address, int addrlen);
- int (*socket_connect) (struct socket *sock,
- struct sockaddr *address, int addrlen);
- int (*socket_listen) (struct socket *sock, int backlog);
- int (*socket_accept) (struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg) (struct socket *sock,
- struct msghdr *msg, int size);
- int (*socket_recvmsg) (struct socket *sock,
- struct msghdr *msg, int size, int flags);
- int (*socket_getsockname) (struct socket *sock);
- int (*socket_getpeername) (struct socket *sock);
- int (*socket_getsockopt) (struct socket *sock, int level, int optname);
- int (*socket_setsockopt) (struct socket *sock, int level, int optname);
- int (*socket_shutdown) (struct socket *sock, int how);
- int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security) (struct sock *sk);
- void (*sk_clone_security) (const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid) (struct sock *sk, u32 *secid);
- void (*sock_graft) (struct sock *sk, struct socket *parent);
- int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
- void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet) (u32 secid);
- void (*secmark_refcount_inc) (void);
- void (*secmark_refcount_dec) (void);
- void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
- int (*tun_dev_alloc_security) (void **security);
- void (*tun_dev_free_security) (void *security);
- int (*tun_dev_create) (void);
- int (*tun_dev_attach_queue) (void *security);
- int (*tun_dev_attach) (struct sock *sk, void *security);
- int (*tun_dev_open) (void *security);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
- int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc) (struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security) (struct xfrm_state *x);
- int (*xfrm_state_delete_security) (struct xfrm_state *x);
- int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
- int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
- void (*key_free) (struct key *key);
- int (*key_permission) (key_ref_t key_ref,
- const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
-
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule);
- int (*audit_rule_known) (struct audit_krule *krule);
- int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx);
- void (*audit_rule_free) (void *lsmrule);
-#endif /* CONFIG_AUDIT */
-};
-
/* prototypes */
extern int security_init(void);
-extern int security_module_enable(struct security_operations *ops);
-extern int register_security(struct security_operations *ops);
-extern void __init security_fixup_ops(struct security_operations *ops);
-
/* Security operations */
int security_binder_set_context_mgr(struct task_struct *mgr);
@@ -1839,7 +256,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
int security_inode_readlink(struct dentry *dentry);
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(const struct path *path);
@@ -2047,7 +465,7 @@ static inline int security_settime(const struct timespec *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return cap_vm_enough_memory(mm, pages);
+ return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_set_creds(struct linux_binprm *bprm)
@@ -2242,7 +660,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
}
static inline int security_inode_follow_link(struct dentry *dentry,
- struct nameidata *nd)
+ struct inode *inode,
+ bool rcu)
{
return 0;
}
@@ -2527,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg4,
unsigned long arg5)
{
- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
@@ -2650,7 +1069,7 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void *
static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
- return cap_netlink_send(sk, skb);
+ return 0;
}
static inline int security_ismaclabel(const char *name)
@@ -3218,36 +1637,5 @@ static inline void free_secdata(void *secdata)
{ }
#endif /* CONFIG_SECURITY */
-#ifdef CONFIG_SECURITY_YAMA
-extern int yama_ptrace_access_check(struct task_struct *child,
- unsigned int mode);
-extern int yama_ptrace_traceme(struct task_struct *parent);
-extern void yama_task_free(struct task_struct *task);
-extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
-#else
-static inline int yama_ptrace_access_check(struct task_struct *child,
- unsigned int mode)
-{
- return 0;
-}
-
-static inline int yama_ptrace_traceme(struct task_struct *parent)
-{
- return 0;
-}
-
-static inline void yama_task_free(struct task_struct *task)
-{
-}
-
-static inline int yama_task_prctl(int option, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_SECURITY_YAMA */
-
#endif /* ! __LINUX_SECURITY_H */
diff --git a/kernel/include/linux/seq_file.h b/kernel/include/linux/seq_file.h
index 7848473a5..dde00defb 100644
--- a/kernel/include/linux/seq_file.h
+++ b/kernel/include/linux/seq_file.h
@@ -114,15 +114,25 @@ int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
-int seq_escape(struct seq_file *, const char *, const char *);
-int seq_putc(struct seq_file *m, char c);
-int seq_puts(struct seq_file *m, const char *s);
int seq_write(struct seq_file *seq, const void *data, size_t len);
-__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
-__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
+__printf(2, 0)
+void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
+__printf(2, 3)
+void seq_printf(struct seq_file *m, const char *fmt, ...);
+void seq_putc(struct seq_file *m, char c);
+void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+ unsigned long long num);
+void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_escape(struct seq_file *m, const char *s, const char *esc);
+
+void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, const void *buf, size_t len,
+ bool ascii);
int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_file_path(struct seq_file *, struct file *, const char *);
int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
@@ -133,10 +143,6 @@ int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
-int seq_put_decimal_ull(struct seq_file *m, char delimiter,
- unsigned long long num);
-int seq_put_decimal_ll(struct seq_file *m, char delimiter,
- long long num);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
diff --git a/kernel/include/linux/seqlock.h b/kernel/include/linux/seqlock.h
index eaade3619..b14f4d236 100644
--- a/kernel/include/linux/seqlock.h
+++ b/kernel/include/linux/seqlock.h
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
+#include <linux/compiler.h>
#include <asm/processor.h>
/*
@@ -243,9 +244,128 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
preempt_enable_rt();
}
-/*
+/**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+ *
+ * This can be used to provide an ordering guarantee instead of the
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
+ *
+ * void read(void)
+ * {
+ * bool x, y;
+ *
+ * do {
+ * int s = read_seqcount_begin(&seq);
+ *
+ * x = X; y = Y;
+ *
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
+ * }
+ *
+ * void write(void)
+ * {
+ * Y = true;
+ *
+ * raw_write_seqcount_barrier(seq);
+ *
+ * X = false;
+ * }
+ */
+static inline void raw_write_seqcount_barrier(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+ s->sequence++;
+}
+
+static inline int raw_read_seqcount_latch(seqcount_t *s)
+{
+ return lockless_dereference(s->sequence);
+}
+
+/**
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
+ *
+ * The latch technique is a multiversion concurrency control method that allows
+ * queries during non-atomic modifications. If you can guarantee queries never
+ * interrupt the modification -- e.g. the concurrency is strictly between CPUs
+ * -- you most likely do not need this.
+ *
+ * Where the traditional RCU/lockless data structures rely on atomic
+ * modifications to ensure queries observe either the old or the new state the
+ * latch allows the same for non-atomic updates. The trade-off is doubling the
+ * cost of storage; we have to maintain two copies of the entire data
+ * structure.
+ *
+ * Very simply put: we first modify one copy and then the other. This ensures
+ * there is always one copy in a stable state, ready to give us an answer.
+ *
+ * The basic form is a data structure like:
+ *
+ * struct latch_struct {
+ * seqcount_t seq;
+ * struct data_struct data[2];
+ * };
+ *
+ * Where a modification, which is assumed to be externally serialized, does the
+ * following:
+ *
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); <- Ensure that the last data[1] update is visible
+ * latch->seq++;
+ * smp_wmb(); <- Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[0], ...);
+ *
+ * smp_wmb(); <- Ensure that the data[0] update is visible
+ * latch->seq++;
+ * smp_wmb(); <- Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[1], ...);
+ * }
+ *
+ * The query will have a form like:
+ *
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ * struct entry *entry;
+ * unsigned seq, idx;
+ *
+ * do {
+ * seq = lockless_dereference(latch->seq);
+ *
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
+ *
+ * smp_rmb();
+ * } while (seq != latch->seq);
+ *
+ * return entry;
+ * }
+ *
+ * So during the modification, queries are first redirected to data[1]. Then we
+ * modify data[0]. When that is complete, we redirect queries back to data[0]
+ * and we can modify data[1].
+ *
+ * NOTE: The non-requirement for atomic modifications does _NOT_ include
+ * the publishing of new entries in the case where data is a dynamic
+ * data structure.
+ *
+ * An iteration might start in data[0] and get suspended long enough
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
+ *
+ * NOTE: When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_t *s)
{
@@ -276,13 +396,13 @@ static inline void write_seqcount_end(seqcount_t *s)
}
/**
- * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
- * After write_seqcount_barrier, no read-side seq operations will complete
+ * After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
-static inline void write_seqcount_barrier(seqcount_t *s)
+static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
diff --git a/kernel/include/linux/serial_8250.h b/kernel/include/linux/serial_8250.h
index 78097e7a3..faa0e0370 100644
--- a/kernel/include/linux/serial_8250.h
+++ b/kernel/include/linux/serial_8250.h
@@ -12,6 +12,7 @@
#define _LINUX_SERIAL_8250_H
#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
#include <linux/platform_device.h>
/*
@@ -135,8 +136,8 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
-extern void serial8250_early_out(struct uart_port *port, int offset, int value);
+extern int early_serial8250_setup(struct earlycon_device *device,
+ const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern int serial8250_do_startup(struct uart_port *port);
@@ -149,6 +150,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
+void serial8250_init_port(struct uart_8250_port *up);
+void serial8250_set_defaults(struct uart_8250_port *up);
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count);
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/kernel/include/linux/serial_core.h b/kernel/include/linux/serial_core.h
index 025dad9dc..297d4fa1c 100644
--- a/kernel/include/linux/serial_core.h
+++ b/kernel/include/linux/serial_core.h
@@ -35,7 +35,7 @@
#define uart_console(port) \
((port)->cons && (port)->cons->index == (port)->line)
#else
-#define uart_console(port) (0)
+#define uart_console(port) ({ (void)port; 0; })
#endif
struct uart_port;
diff --git a/kernel/include/linux/serial_sci.h b/kernel/include/linux/serial_sci.h
index 6c5e3bb28..7c536ac5b 100644
--- a/kernel/include/linux/serial_sci.h
+++ b/kernel/include/linux/serial_sci.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_SERIAL_SCI_H
#define __LINUX_SERIAL_SCI_H
+#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/sh_dma.h>
@@ -10,59 +11,16 @@
#define SCIx_NOT_SUPPORTED (-1)
-/* SCSMR (Serial Mode Register) */
-#define SCSMR_CHR (1 << 6) /* 7-bit Character Length */
-#define SCSMR_PE (1 << 5) /* Parity Enable */
-#define SCSMR_ODD (1 << 4) /* Odd Parity */
-#define SCSMR_STOP (1 << 3) /* Stop Bit Length */
-#define SCSMR_CKS 0x0003 /* Clock Select */
-
/* Serial Control Register (@ = not supported by all parts) */
-#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */
-#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */
-#define SCSCR_TE (1 << 5) /* Transmit Enable */
-#define SCSCR_RE (1 << 4) /* Receive Enable */
-#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */
-#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */
-#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */
-#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */
-/* SCIFA/SCIFB only */
-#define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */
-#define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */
-
-/* SCxSR (Serial Status Register) on SCI */
-#define SCI_TDRE 0x80 /* Transmit Data Register Empty */
-#define SCI_RDRF 0x40 /* Receive Data Register Full */
-#define SCI_ORER 0x20 /* Overrun Error */
-#define SCI_FER 0x10 /* Framing Error */
-#define SCI_PER 0x08 /* Parity Error */
-#define SCI_TEND 0x04 /* Transmit End */
-
-#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
-
-/* SCxSR (Serial Status Register) on SCIF, HSCIF */
-#define SCIF_ER 0x0080 /* Receive Error */
-#define SCIF_TEND 0x0040 /* Transmission End */
-#define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */
-#define SCIF_BRK 0x0010 /* Break Detect */
-#define SCIF_FER 0x0008 /* Framing Error */
-#define SCIF_PER 0x0004 /* Parity Error */
-#define SCIF_RDF 0x0002 /* Receive FIFO Data Full */
-#define SCIF_DR 0x0001 /* Receive Data Ready */
-
-#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
-
-/* SCFCR (FIFO Control Register) */
-#define SCFCR_LOOP (1 << 0) /* Loopback Test */
-
-/* SCSPTR (Serial Port Register), optional */
-#define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */
-#define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */
-#define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */
-#define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */
-
-/* HSSRR HSCIF */
-#define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */
+#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */
+#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */
+#define SCSCR_TE BIT(5) /* Transmit Enable */
+#define SCSCR_RE BIT(4) /* Receive Enable */
+#define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */
+#define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */
+#define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */
+#define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */
+
enum {
SCIx_PROBE_REGTYPE,
@@ -82,28 +40,6 @@ enum {
SCIx_NR_REGTYPES,
};
-/*
- * SCI register subset common for all port types.
- * Not all registers will exist on all parts.
- */
-enum {
- SCSMR, /* Serial Mode Register */
- SCBRR, /* Bit Rate Register */
- SCSCR, /* Serial Control Register */
- SCxSR, /* Serial Status Register */
- SCFCR, /* FIFO Control Register */
- SCFDR, /* FIFO Data Count Register */
- SCxTDR, /* Transmit (FIFO) Data Register */
- SCxRDR, /* Receive (FIFO) Data Register */
- SCLSR, /* Line Status Register */
- SCTFDR, /* Transmit FIFO Data Count Register */
- SCRFDR, /* Receive FIFO Data Count Register */
- SCSPTR, /* Serial Port Register */
- HSSRR, /* Sampling Rate Register */
-
- SCIx_NR_REGS,
-};
-
struct device;
struct plat_sci_port_ops {
@@ -113,7 +49,7 @@ struct plat_sci_port_ops {
/*
* Port-specific capabilities
*/
-#define SCIx_HAVE_RTSCTS (1 << 0)
+#define SCIx_HAVE_RTSCTS BIT(0)
/*
* Platform device specific platform_data struct
diff --git a/kernel/include/linux/serio.h b/kernel/include/linux/serio.h
index 9f779c7a2..df4ab5de1 100644
--- a/kernel/include/linux/serio.h
+++ b/kernel/include/linux/serio.h
@@ -18,6 +18,8 @@
#include <linux/mod_devicetable.h>
#include <uapi/linux/serio.h>
+extern struct bus_type serio_bus;
+
struct serio {
void *port_data;
diff --git a/kernel/include/linux/shdma-base.h b/kernel/include/linux/shdma-base.h
index dd0ba502c..d927647e6 100644
--- a/kernel/include/linux/shdma-base.h
+++ b/kernel/include/linux/shdma-base.h
@@ -128,7 +128,10 @@ void shdma_cleanup(struct shdma_dev *sdev);
#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
#else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ return false;
+}
#endif
#endif
diff --git a/kernel/include/linux/shmem_fs.h b/kernel/include/linux/shmem_fs.h
index 50777b5b1..92d112aee 100644
--- a/kernel/include/linux/shmem_fs.h
+++ b/kernel/include/linux/shmem_fs.h
@@ -15,10 +15,7 @@ struct shmem_inode_info {
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
- union {
- unsigned long swapped; /* subtotal assigned to swap */
- char *symlink; /* unswappable short symlink */
- };
+ unsigned long swapped; /* subtotal assigned to swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct list_head swaplist; /* chain of maybes on swap */
struct simple_xattrs xattrs; /* list of xattrs */
diff --git a/kernel/include/linux/signal.h b/kernel/include/linux/signal.h
index 66215b20a..561a1283e 100644
--- a/kernel/include/linux/signal.h
+++ b/kernel/include/linux/signal.h
@@ -240,7 +240,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int sigsuspend(sigset_t *);
struct sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/kernel/include/linux/skbuff.h b/kernel/include/linux/skbuff.h
index b42539122..71a6f5b04 100644
--- a/kernel/include/linux/skbuff.h
+++ b/kernel/include/linux/skbuff.h
@@ -34,7 +34,10 @@
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
#include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
+#include <linux/in6.h>
+#include <net/flow.h>
/* A. Checksumming of received packets by device.
*
@@ -170,13 +173,26 @@ struct nf_bridge_info {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
- } orig_proto;
- bool pkt_otherhost;
- unsigned int mask;
+ } orig_proto:8;
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
+ __u16 frag_max_size;
struct net_device *physindev;
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
- char neigh_header[8];
- __be32 ipv4_daddr;
+ union {
+ /* prerouting: detect dnat in orig/reply direction */
+ __be32 ipv4_daddr;
+ struct in6_addr ipv6_daddr;
+
+ /* after prerouting + nat detected: store original source
+ * mac since neigh resolution overwrites it, only used while
+ * skb is out in neigh layer.
+ */
+ char neigh_header[8];
+ };
};
#endif
@@ -204,6 +220,7 @@ struct sk_buff;
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
+extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;
@@ -448,6 +465,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
return delta_us;
}
+static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
+ const struct skb_mstamp *t0)
+{
+ s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
+
+ if (!diff)
+ diff = t1->stamp_us - t0->stamp_us;
+ return diff > 0;
+}
/**
* struct sk_buff - socket buffer
@@ -499,6 +525,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
+ * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -643,9 +670,15 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
+ __u32 secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ __u32 offload_fwd_mark;
#endif
+ };
+
union {
__u32 mark;
__u32 reserved_tailroom;
@@ -860,6 +893,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int len, int odd, struct sk_buff *skb),
void *from, int length);
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ int offset, size_t size);
+
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
@@ -912,15 +948,90 @@ enum pkt_hash_types {
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+static inline void skb_clear_hash(struct sk_buff *skb)
{
- skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->hash = 0;
skb->sw_hash = 0;
+ skb->l4_hash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_hash)
+ skb_clear_hash(skb);
+}
+
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+ skb->l4_hash = is_l4;
+ skb->sw_hash = is_sw;
skb->hash = hash;
}
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ /* Used by drivers to set hash from HW */
+ __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+ __skb_set_hash(skb, hash, true, is_l4);
+}
+
void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen,
+ unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, unsigned int flags)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen, flags);
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -929,22 +1040,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
return skb->hash;
}
-static inline void skb_clear_hash(struct sk_buff *skb)
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
{
- skb->hash = 0;
- skb->sw_hash = 0;
- skb->l4_hash = 0;
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
+ return skb->hash;
}
-static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
- if (!skb->l4_hash)
- skb_clear_hash(skb);
+ return skb->hash;
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1098,7 +1226,7 @@ static inline int skb_cloned(const struct sk_buff *skb)
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);
@@ -1182,7 +1310,7 @@ static inline int skb_shared(const struct sk_buff *skb)
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
@@ -1218,7 +1346,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
- might_sleep_if(pri & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
@@ -1938,8 +2066,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect(skb, &keys))
- skb_set_transport_header(skb, keys.thoff);
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+ skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
}
@@ -2130,10 +2258,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2188,6 +2312,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+static inline void skb_free_frag(void *addr)
+{
+ __free_page_frag(addr);
+}
+
void *napi_alloc_frag(unsigned int fragsz);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
@@ -2595,6 +2724,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
@@ -2661,12 +2793,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
skb_shinfo(skb)->frag_list = NULL;
}
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
- frag->next = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = frag;
-}
-
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@ -2695,9 +2821,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags);
+ unsigned int flags,
+ ssize_t (*splice_cb)(struct sock *,
+ struct pipe_inode_info *,
+ struct splice_pipe_desc *));
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2732,8 +2864,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
if (hlen - offset >= len)
return data + offset;
@@ -2745,8 +2878,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
return buffer;
}
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
@@ -2867,11 +3000,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
*
* PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
*
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
@@ -3053,7 +3186,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
}
} else if (skb->csum_bad) {
/* ip_summed == CHECKSUM_NONE in this case */
- return 1;
+ return (__force __sum16)1;
}
skb->csum = psum;
@@ -3301,9 +3434,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
@@ -3324,7 +3454,8 @@ struct skb_gso_cb {
int encap_level;
__u16 csum_start;
};
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
+#define SKB_SGO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
@@ -3358,15 +3489,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __u16 csum;
+ skb_transport_offset(skb);
+ __wsum partial;
- csum = csum_fold(csum_partial(skb_transport_header(skb),
- plen, skb->csum));
+ partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
skb->csum = res;
SKB_GSO_CB(skb)->csum_start -= plen;
- return csum;
+ return csum_fold(partial);
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3421,10 +3551,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+ unsigned int transport_len,
+ __sum16(*skb_chkf)(struct sk_buff *skb));
/**
* skb_head_is_locked - Determine if the skb->head is locked down
@@ -3456,5 +3585,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/kernel/include/linux/slab.h b/kernel/include/linux/slab.h
index ffd24c830..2037a861e 100644
--- a/kernel/include/linux/slab.h
+++ b/kernel/include/linux/slab.h
@@ -111,7 +111,7 @@ struct mem_cgroup;
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
-int slab_is_available(void);
+bool slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
@@ -158,6 +158,24 @@ size_t ksize(const void *);
#endif
/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
+ * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
+ * aligned pointers.
+ */
+#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
+#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
+#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
+
+/*
* Kmalloc array related definitions
*/
@@ -240,8 +258,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* belongs to.
* 0 = zero alloc
* 1 = 65 .. 96 bytes
- * 2 = 120 .. 192 bytes
- * n = 2^(n-1) .. 2^n -1
+ * 2 = 129 .. 192 bytes
+ * n = 2^(n-1)+1 .. 2^n
*/
static __always_inline int kmalloc_index(size_t size)
{
@@ -286,13 +304,23 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags);
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
void kmem_cache_free(struct kmem_cache *, void *);
+/*
+ * Bulk allocation and freeing operations. These are accellerated in an
+ * allocator specific way to avoid taking locks repeatedly or building
+ * metadata structures unnecessarily.
+ *
+ * Note that interrupts must be enabled when calling these functions.
+ */
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -306,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size);
+ int node, size_t size) __assume_slab_alignment;
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -344,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -472,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-/*
- * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
- * Intended for arches that get misalignment faults even for 64 bit integer
- * aligned buffers.
- */
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
struct memcg_cache_array {
struct rcu_head rcu;
struct kmem_cache *entries[0];
diff --git a/kernel/include/linux/smpboot.h b/kernel/include/linux/smpboot.h
index d600afb21..12910cf19 100644
--- a/kernel/include/linux/smpboot.h
+++ b/kernel/include/linux/smpboot.h
@@ -24,9 +24,8 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
- * @pre_unpark: Optional unpark function, called before the thread is
- * unparked (cpu online). This is not guaranteed to be
- * called on the target cpu of the thread. Careful!
+ * @cpumask: Internal state. To update which threads are unparked,
+ * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -40,12 +39,23 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
- void (*pre_unpark)(unsigned int cpu);
+ cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *cpumask);
+
+static inline int
+smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+{
+ return smpboot_register_percpu_thread_cpumask(plug_thread,
+ cpu_possible_mask);
+}
+
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *);
#endif
diff --git a/kernel/include/linux/soc/brcmstb/brcmstb.h b/kernel/include/linux/soc/brcmstb/brcmstb.h
new file mode 100644
index 000000000..337ce414e
--- /dev/null
+++ b/kernel/include/linux/soc/brcmstb/brcmstb.h
@@ -0,0 +1,10 @@
+#ifndef __BRCMSTB_SOC_H
+#define __BRCMSTB_SOC_H
+
+/*
+ * Bus Interface Unit control register setup, must happen early during boot,
+ * before SMP is brought up, called by machine entry point.
+ */
+void brcmstb_biuctrl_init(void);
+
+#endif /* __BRCMSTB_SOC_H */
diff --git a/kernel/include/linux/soc/dove/pmu.h b/kernel/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000..9c99f84bc
--- /dev/null
+++ b/kernel/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
+#ifndef LINUX_SOC_DOVE_PMU_H
+#define LINUX_SOC_DOVE_PMU_H
+
+int dove_init_pmu(void);
+
+#endif
diff --git a/kernel/include/linux/soc/mediatek/infracfg.h b/kernel/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 000000000..a5714e93f
--- /dev/null
+++ b/kernel/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
+#ifndef __SOC_MEDIATEK_INFRACFG_H
+#define __SOC_MEDIATEK_INFRACFG_H
+
+#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
+#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
+#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
+#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6)
+#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12)
+#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13)
+#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16)
+#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17)
+#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18)
+#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19)
+#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
+#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
+
+#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/kernel/include/linux/soc/qcom/smd-rpm.h b/kernel/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 000000000..2a53dcaee
--- /dev/null
+++ b/kernel/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
+#ifndef __QCOM_SMD_RPM_H__
+#define __QCOM_SMD_RPM_H__
+
+struct qcom_smd_rpm;
+
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+
+/*
+ * Constants used for addressing resources in the RPM.
+ */
+#define QCOM_SMD_RPM_BOOST 0x61747362
+#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
+#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
+#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
+#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
+#define QCOM_SMD_RPM_LDOA 0x616f646c
+#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
+#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
+#define QCOM_SMD_RPM_NCPA 0x6170636E
+#define QCOM_SMD_RPM_NCPB 0x6270636E
+#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
+#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_SMPA 0x61706d73
+#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SPDM 0x63707362
+#define QCOM_SMD_RPM_VSA 0x00617376
+
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 resource_type, u32 resource_id,
+ void *buf, size_t count);
+
+#endif
diff --git a/kernel/include/linux/soc/qcom/smd.h b/kernel/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000..d0cb6d189
--- /dev/null
+++ b/kernel/include/linux/soc/qcom/smd.h
@@ -0,0 +1,57 @@
+#ifndef __QCOM_SMD_H__
+#define __QCOM_SMD_H__
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct qcom_smd;
+struct qcom_smd_channel;
+struct qcom_smd_lookup;
+
+/**
+ * struct qcom_smd_id - struct used for matching a smd device
+ * @name: name of the channel
+ */
+struct qcom_smd_id {
+ char name[20];
+};
+
+/**
+ * struct qcom_smd_device - smd device struct
+ * @dev: the device struct
+ * @channel: handle to the smd channel for this device
+ */
+struct qcom_smd_device {
+ struct device dev;
+ struct qcom_smd_channel *channel;
+};
+
+/**
+ * struct qcom_smd_driver - smd driver struct
+ * @driver: underlying device driver
+ * @smd_match_table: static channel match table
+ * @probe: invoked when the smd channel is found
+ * @remove: invoked when the smd channel is closed
+ * @callback: invoked when an inbound message is received on the channel,
+ * should return 0 on success or -EBUSY if the data cannot be
+ * consumed at this time
+ */
+struct qcom_smd_driver {
+ struct device_driver driver;
+ const struct qcom_smd_id *smd_match_table;
+
+ int (*probe)(struct qcom_smd_device *dev);
+ void (*remove)(struct qcom_smd_device *dev);
+ int (*callback)(struct qcom_smd_device *, const void *, size_t);
+};
+
+int qcom_smd_driver_register(struct qcom_smd_driver *drv);
+void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+
+#define module_qcom_smd_driver(__smd_driver) \
+ module_driver(__smd_driver, qcom_smd_driver_register, \
+ qcom_smd_driver_unregister)
+
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#endif
diff --git a/kernel/include/linux/soc/qcom/smem.h b/kernel/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000..785e196ee
--- /dev/null
+++ b/kernel/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
+#ifndef __QCOM_SMEM_H__
+#define __QCOM_SMEM_H__
+
+#define QCOM_SMEM_HOST_ANY -1
+
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
+
+int qcom_smem_get_free_space(unsigned host);
+
+#endif
diff --git a/kernel/include/linux/soc/sunxi/sunxi_sram.h b/kernel/include/linux/soc/sunxi/sunxi_sram.h
new file mode 100644
index 000000000..c5f663bba
--- /dev/null
+++ b/kernel/include/linux/soc/sunxi/sunxi_sram.h
@@ -0,0 +1,19 @@
+/*
+ * Allwinner SoCs SRAM Controller Driver
+ *
+ * Copyright (C) 2015 Maxime Ripard
+ *
+ * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _SUNXI_SRAM_H_
+#define _SUNXI_SRAM_H_
+
+int sunxi_sram_claim(struct device *dev);
+int sunxi_sram_release(struct device *dev);
+
+#endif /* _SUNXI_SRAM_H_ */
diff --git a/kernel/include/linux/sock_diag.h b/kernel/include/linux/sock_diag.h
index 083ac3880..fddebc617 100644
--- a/kernel/include/linux/sock_diag.h
+++ b/kernel/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
#ifndef __SOCK_DIAG_H__
#define __SOCK_DIAG_H__
+#include <linux/netlink.h>
#include <linux/user_namespace.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
struct sk_buff;
@@ -11,6 +14,7 @@ struct sock;
struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+ int (*get_info)(struct sk_buff *skb, struct sock *sk);
};
int sock_diag_register(const struct sock_diag_handler *h);
@@ -26,4 +30,42 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype);
+static inline
+enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
+{
+ switch (sk->sk_family) {
+ case AF_INET:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ case AF_INET6:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET6_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET6_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ default:
+ return SKNLGRP_NONE;
+ }
+}
+
+static inline
+bool sock_diag_has_destroy_listeners(const struct sock *sk)
+{
+ const struct net *n = sock_net(sk);
+ const enum sknetlink_groups group = sock_diag_destroy_group(sk);
+
+ return group != SKNLGRP_NONE && n->diag_nlsk &&
+ netlink_has_listeners(n->diag_nlsk, group);
+}
+void sock_diag_broadcast_destroy(struct sock *sk);
+
#endif
diff --git a/kernel/include/linux/spi/cc2520.h b/kernel/include/linux/spi/cc2520.h
index e741e8baa..85b8ee67e 100644
--- a/kernel/include/linux/spi/cc2520.h
+++ b/kernel/include/linux/spi/cc2520.h
@@ -21,7 +21,6 @@ struct cc2520_platform_data {
int sfd;
int reset;
int vreg;
- bool amplified;
};
#endif
diff --git a/kernel/include/linux/spi/pxa2xx_spi.h b/kernel/include/linux/spi/pxa2xx_spi.h
index 6d36dacec..9ec4c147a 100644
--- a/kernel/include/linux/spi/pxa2xx_spi.h
+++ b/kernel/include/linux/spi/pxa2xx_spi.h
@@ -23,7 +23,6 @@ struct dma_chan;
/* device.platform_data for SSP controller devices */
struct pxa2xx_spi_master {
- u32 clock_enable;
u16 num_chipselect;
u8 enable_dma;
diff --git a/kernel/include/linux/spi/spi.h b/kernel/include/linux/spi/spi.h
index d67307234..cce80e6dc 100644
--- a/kernel/include/linux/spi/spi.h
+++ b/kernel/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
#include <linux/scatterlist.h>
struct dma_chan;
+struct spi_master;
+struct spi_transfer;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,63 @@ struct dma_chan;
extern struct bus_type spi_bus_type;
/**
+ * struct spi_statistics - statistics for spi transfers
+ * @lock: lock protecting this structure
+ *
+ * @messages: number of spi-messages handled
+ * @transfers: number of spi_transfers handled
+ * @errors: number of errors during spi_transfer
+ * @timedout: number of timeouts during spi_transfer
+ *
+ * @spi_sync: number of times spi_sync is used
+ * @spi_sync_immediate:
+ * number of times spi_sync is executed immediately
+ * in calling context without queuing and scheduling
+ * @spi_async: number of times spi_async is used
+ *
+ * @bytes: number of bytes transferred to/from device
+ * @bytes_tx: number of bytes sent to device
+ * @bytes_rx: number of bytes received from device
+ *
+ * @transfer_bytes_histo:
+ * transfer bytes histogramm
+ */
+struct spi_statistics {
+ spinlock_t lock; /* lock for the whole structure */
+
+ unsigned long messages;
+ unsigned long transfers;
+ unsigned long errors;
+ unsigned long timedout;
+
+ unsigned long spi_sync;
+ unsigned long spi_sync_immediate;
+ unsigned long spi_async;
+
+ unsigned long long bytes;
+ unsigned long long bytes_rx;
+ unsigned long long bytes_tx;
+
+#define SPI_STATISTICS_HISTO_SIZE 17
+ unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_master *master);
+
+#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
+ do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&(stats)->lock, flags); \
+ (stats)->field += count; \
+ spin_unlock_irqrestore(&(stats)->lock, flags); \
+ } while (0)
+
+#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
+ SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+
+/**
* struct spi_device - Master side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @master: SPI controller used with the device.
@@ -60,6 +119,8 @@ extern struct bus_type spi_bus_type;
* @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
* when not using a GPIO line)
*
+ * @statistics: statistics for the spi_device
+ *
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
*
@@ -98,6 +159,9 @@ struct spi_device {
char modalias[SPI_NAME_SIZE];
int cs_gpio; /* chip select gpio */
+ /* the statistics */
+ struct spi_statistics statistics;
+
/*
* likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -190,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
return drv ? container_of(drv, struct spi_driver, driver) : NULL;
}
-extern int spi_register_driver(struct spi_driver *sdrv);
+extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
/**
* spi_unregister_driver - reverse effect of spi_register_driver
@@ -203,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define spi_register_driver(driver) \
+ __spi_register_driver(THIS_MODULE, driver)
+
/**
* module_spi_driver() - Helper macro for registering a SPI driver
* @__spi_driver: spi_driver struct
@@ -296,6 +364,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
+ * @statistics: statistics for the spi_master
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +521,9 @@ struct spi_master {
/* gpio chip select */
int *cs_gpios;
+ /* statistics */
+ struct spi_statistics statistics;
+
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
@@ -779,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master);
* @len: data buffer size
* Context: can sleep
*
- * This writes the buffer and returns zero or a negative error code.
+ * This function writes the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_write(struct spi_device *spi, const void *buf, size_t len)
@@ -803,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len)
* @len: data buffer size
* Context: can sleep
*
- * This reads the buffer and returns zero or a negative error code.
+ * This function reads the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len)
@@ -831,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
*
* For more specific semantics see spi_sync().
*
- * It returns zero on success, else a negative error code.
+ * Return: Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
@@ -855,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi,
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) eight bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) eight bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
{
@@ -876,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
- *
* The number is returned in wire-order, which is at least sometimes
* big-endian.
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
{
@@ -900,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the device in cpu
- * endianness, or else a negative error code. Callable only from contexts that
- * can sleep.
- *
* This function is similar to spi_w8r16, with the exception that it will
* convert the read 16 bit data word from big-endian to native endianness.
*
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
diff --git a/kernel/include/linux/spi/spi_bitbang.h b/kernel/include/linux/spi/spi_bitbang.h
index 85578d4be..154788ed2 100644
--- a/kernel/include/linux/spi/spi_bitbang.h
+++ b/kernel/include/linux/spi/spi_bitbang.h
@@ -4,7 +4,7 @@
#include <linux/workqueue.h>
struct spi_bitbang {
- spinlock_t lock;
+ struct mutex lock;
u8 busy;
u8 use_dma;
u8 flags; /* extra spi->mode support */
diff --git a/kernel/include/linux/spinlock.h b/kernel/include/linux/spinlock.h
index 28f4366fd..b241cc044 100644
--- a/kernel/include/linux/spinlock.h
+++ b/kernel/include/linux/spinlock.h
@@ -120,7 +120,7 @@ do { \
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
- * can not be reordered with a LOAD inside this section.
+ * can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
@@ -130,16 +130,6 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
-/*
- * Place this after a lock-acquisition primitive to guarantee that
- * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
- * if the UNLOCK and LOCK are executed by the same CPU or if the
- * UNLOCK and LOCK operate on the same lock variable.
- */
-#ifndef smp_mb__after_unlock_lock
-#define smp_mb__after_unlock_lock() do { } while (0)
-#endif
-
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
@@ -304,7 +294,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
@@ -315,17 +305,17 @@ do { \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
-static inline void spin_lock(spinlock_t *lock)
+static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
-static inline void spin_lock_bh(spinlock_t *lock)
+static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
-static inline int spin_trylock(spinlock_t *lock)
+static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
@@ -345,7 +335,7 @@ do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
-static inline void spin_lock_irq(spinlock_t *lock)
+static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -360,32 +350,32 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
-static inline void spin_unlock(spinlock_t *lock)
+static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
-static inline void spin_unlock_bh(spinlock_t *lock)
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
-static inline void spin_unlock_irq(spinlock_t *lock)
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
-static inline int spin_trylock_bh(spinlock_t *lock)
+static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
-static inline int spin_trylock_irq(spinlock_t *lock)
+static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
@@ -395,22 +385,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-static inline void spin_unlock_wait(spinlock_t *lock)
+static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
-static inline int spin_is_locked(spinlock_t *lock)
+static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
-static inline int spin_is_contended(spinlock_t *lock)
+static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
-static inline int spin_can_lock(spinlock_t *lock)
+static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
diff --git a/kernel/include/linux/spinlock_rt.h b/kernel/include/linux/spinlock_rt.h
index f757096b2..3b2825537 100644
--- a/kernel/include/linux/spinlock_rt.h
+++ b/kernel/include/linux/spinlock_rt.h
@@ -18,6 +18,10 @@ do { \
__rt_spin_lock_init(slock, #slock, &__key); \
} while (0)
+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
+int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
+
extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
@@ -32,20 +36,16 @@ extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
* lockdep-less calls, for derived types like rwlock:
* (for trylock they can use rt_mutex_trylock() directly.
*/
+extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
-#define spin_lock(lock) \
- do { \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
+#define spin_lock(lock) rt_spin_lock(lock)
#define spin_lock_bh(lock) \
do { \
local_bh_disable(); \
- migrate_disable(); \
rt_spin_lock(lock); \
} while (0)
@@ -56,24 +56,19 @@ extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
#define spin_trylock(lock) \
({ \
int __locked; \
- migrate_disable(); \
__locked = spin_do_trylock(lock); \
- if (!__locked) \
- migrate_enable(); \
__locked; \
})
#ifdef CONFIG_LOCKDEP
# define spin_lock_nested(lock, subclass) \
do { \
- migrate_disable(); \
rt_spin_lock_nested(lock, subclass); \
} while (0)
#define spin_lock_bh_nested(lock, subclass) \
do { \
local_bh_disable(); \
- migrate_disable(); \
rt_spin_lock_nested(lock, subclass); \
} while (0)
@@ -81,7 +76,6 @@ extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
do { \
typecheck(unsigned long, flags); \
flags = 0; \
- migrate_disable(); \
rt_spin_lock_nested(lock, subclass); \
} while (0)
#else
@@ -117,16 +111,11 @@ static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
/* FIXME: we need rt_spin_lock_nest_lock */
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-#define spin_unlock(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- } while (0)
+#define spin_unlock(lock) rt_spin_unlock(lock)
#define spin_unlock_bh(lock) \
do { \
rt_spin_unlock(lock); \
- migrate_enable(); \
local_bh_enable(); \
} while (0)
diff --git a/kernel/include/linux/spmi.h b/kernel/include/linux/spmi.h
index f84212cd3..1396a255d 100644
--- a/kernel/include/linux/spmi.h
+++ b/kernel/include/linux/spmi.h
@@ -153,7 +153,9 @@ static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
return container_of(d, struct spmi_driver, driver);
}
-int spmi_driver_register(struct spmi_driver *sdrv);
+#define spmi_driver_register(sdrv) \
+ __spmi_driver_register(sdrv, THIS_MODULE)
+int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner);
/**
* spmi_driver_unregister() - unregister an SPMI client driver
diff --git a/kernel/include/linux/srcu.h b/kernel/include/linux/srcu.h
index a9c3c49cd..ec1a8f015 100644
--- a/kernel/include/linux/srcu.h
+++ b/kernel/include/linux/srcu.h
@@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
*/
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
{
- int retval = __srcu_read_lock(sp);
+ int retval;
+ preempt_disable();
+ retval = __srcu_read_lock(sp);
+ preempt_enable();
rcu_lock_acquire(&(sp)->dep_map);
return retval;
}
diff --git a/kernel/include/linux/ssb/ssb.h b/kernel/include/linux/ssb/ssb.h
index 4568a5cc9..c3d1a525b 100644
--- a/kernel/include/linux/ssb/ssb.h
+++ b/kernel/include/linux/ssb/ssb.h
@@ -29,10 +29,13 @@ struct ssb_sprom {
u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */
u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */
u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */
+ u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */
u8 et0phyaddr; /* MII address for enet0 */
u8 et1phyaddr; /* MII address for enet1 */
+ u8 et2phyaddr; /* MII address for enet2 */
u8 et0mdcport; /* MDIO for enet0 */
u8 et1mdcport; /* MDIO for enet1 */
+ u8 et2mdcport; /* MDIO for enet2 */
u16 dev_id; /* Device ID overriding e.g. PCI ID */
u16 board_rev; /* Board revision number from SPROM. */
u16 board_num; /* Board number from SPROM. */
@@ -88,11 +91,14 @@ struct ssb_sprom {
u32 ofdm5glpo; /* 5.2GHz OFDM power offset */
u32 ofdm5gpo; /* 5.3GHz OFDM power offset */
u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */
+ u32 boardflags;
+ u32 boardflags2;
+ u32 boardflags3;
+ /* TODO: Switch all drivers to new u32 fields and drop below ones */
u16 boardflags_lo; /* Board flags (bits 0-15) */
u16 boardflags_hi; /* Board flags (bits 16-31) */
u16 boardflags2_lo; /* Board flags (bits 32-47) */
u16 boardflags2_hi; /* Board flags (bits 48-63) */
- /* TODO store board flags in a single u64 */
struct ssb_sprom_core_pwr_info core_pwr_info[4];
diff --git a/kernel/include/linux/stddef.h b/kernel/include/linux/stddef.h
index 076af4372..9c61c7cda 100644
--- a/kernel/include/linux/stddef.h
+++ b/kernel/include/linux/stddef.h
@@ -3,7 +3,6 @@
#include <uapi/linux/stddef.h>
-
#undef NULL
#define NULL ((void *)0)
@@ -14,10 +13,9 @@ enum {
#undef offsetof
#ifdef __compiler_offsetof
-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
#else
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
+#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#endif
/**
@@ -28,3 +26,5 @@ enum {
*/
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
+
+#endif
diff --git a/kernel/include/linux/stm.h b/kernel/include/linux/stm.h
new file mode 100644
index 000000000..9d0083d36
--- /dev/null
+++ b/kernel/include/linux/stm.h
@@ -0,0 +1,126 @@
+/*
+ * System Trace Module (STM) infrastructure apis
+ * Copyright (C) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _STM_H_
+#define _STM_H_
+
+#include <linux/device.h>
+
+/**
+ * enum stp_packet_type - STP packets that an STM driver sends
+ */
+enum stp_packet_type {
+ STP_PACKET_DATA = 0,
+ STP_PACKET_FLAG,
+ STP_PACKET_USER,
+ STP_PACKET_MERR,
+ STP_PACKET_GERR,
+ STP_PACKET_TRIG,
+ STP_PACKET_XSYNC,
+};
+
+/**
+ * enum stp_packet_flags - STP packet modifiers
+ */
+enum stp_packet_flags {
+ STP_PACKET_MARKED = 0x1,
+ STP_PACKET_TIMESTAMPED = 0x2,
+};
+
+struct stp_policy;
+
+struct stm_device;
+
+/**
+ * struct stm_data - STM device description and callbacks
+ * @name: device name
+ * @stm: internal structure, only used by stm class code
+ * @sw_start: first STP master available to software
+ * @sw_end: last STP master available to software
+ * @sw_nchannels: number of STP channels per master
+ * @sw_mmiosz: size of one channel's IO space, for mmap, optional
+ * @packet: callback that sends an STP packet
+ * @mmio_addr: mmap callback, optional
+ * @link: called when a new stm_source gets linked to us, optional
+ * @unlink: likewise for unlinking, again optional
+ * @set_options: set device-specific options on a channel
+ *
+ * Fill out this structure before calling stm_register_device() to create
+ * an STM device and stm_unregister_device() to destroy it. It will also be
+ * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options()
+ * callbacks.
+ *
+ * Normally, an STM device will have a range of masters available to software
+ * and the rest being statically assigned to various hardware trace sources.
+ * The former is defined by the the range [@sw_start..@sw_end] of the device
+ * description. That is, the lowest master that can be allocated to software
+ * writers is @sw_start and data from this writer will appear is @sw_start
+ * master in the STP stream.
+ */
+struct stm_data {
+ const char *name;
+ struct stm_device *stm;
+ unsigned int sw_start;
+ unsigned int sw_end;
+ unsigned int sw_nchannels;
+ unsigned int sw_mmiosz;
+ ssize_t (*packet)(struct stm_data *, unsigned int,
+ unsigned int, unsigned int,
+ unsigned int, unsigned int,
+ const unsigned char *);
+ phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int,
+ unsigned int, unsigned int);
+ int (*link)(struct stm_data *, unsigned int,
+ unsigned int);
+ void (*unlink)(struct stm_data *, unsigned int,
+ unsigned int);
+ long (*set_options)(struct stm_data *, unsigned int,
+ unsigned int, unsigned int,
+ unsigned long);
+};
+
+int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ struct module *owner);
+void stm_unregister_device(struct stm_data *stm_data);
+
+struct stm_source_device;
+
+/**
+ * struct stm_source_data - STM source device description and callbacks
+ * @name: device name, will be used for policy lookup
+ * @src: internal structure, only used by stm class code
+ * @nr_chans: number of channels to allocate
+ * @link: called when this source gets linked to an STM device
+ * @unlink: called when this source is about to get unlinked from its STM
+ *
+ * Fill in this structure before calling stm_source_register_device() to
+ * register a source device. Also pass it to unregister and write calls.
+ */
+struct stm_source_data {
+ const char *name;
+ struct stm_source_device *src;
+ unsigned int percpu;
+ unsigned int nr_chans;
+ int (*link)(struct stm_source_data *data);
+ void (*unlink)(struct stm_source_data *data);
+};
+
+int stm_source_register_device(struct device *parent,
+ struct stm_source_data *data);
+void stm_source_unregister_device(struct stm_source_data *data);
+
+int stm_source_write(struct stm_source_data *data, unsigned int chan,
+ const char *buf, size_t count);
+
+#endif /* _STM_H_ */
diff --git a/kernel/include/linux/stmmac.h b/kernel/include/linux/stmmac.h
index 7f484a239..eead8ab93 100644
--- a/kernel/include/linux/stmmac.h
+++ b/kernel/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
int phy_addr;
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
+ struct device_node *phy_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
@@ -118,30 +119,8 @@ struct plat_stmmacenet_data {
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
- int has_gmac;
- int enh_desc;
- int tx_coe;
- int rx_coe;
- int bugged_jumbo;
- int pmt;
- int riwt_off;
- void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
-};
#endif
diff --git a/kernel/include/linux/stop_machine.h b/kernel/include/linux/stop_machine.h
index d2abbdb8c..0e1b15405 100644
--- a/kernel/include/linux/stop_machine.h
+++ b/kernel/include/linux/stop_machine.h
@@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+void stop_machine_park(int cpu);
+void stop_machine_unpark(int cpu);
#else /* CONFIG_SMP */
@@ -97,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preemption.
*/
-#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
@@ -112,25 +114,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
-int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
-/**
- * __stop_machine: freeze the machine on all CPUs and run this function
- * @fn: the function to run
- * @data: the data ptr for the @fn
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
- *
- * Description: This is a special version of the above, which assumes cpus
- * won't come or go while it's being called. Used by hotplug cpu.
- */
-int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-
-int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
+#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
-
-static inline int __stop_machine(int (*fn)(void *), void *data,
+static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -141,17 +131,11 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
return ret;
}
-static inline int stop_machine(int (*fn)(void *), void *data,
- const struct cpumask *cpus)
-{
- return __stop_machine(fn, data, cpus);
-}
-
-static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
- return __stop_machine(fn, data, cpus);
+ return stop_machine(fn, data, cpus);
}
-#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_STOP_MACHINE */
diff --git a/kernel/include/linux/string.h b/kernel/include/linux/string.h
index e40099e58..9ef7795e6 100644
--- a/kernel/include/linux/string.h
+++ b/kernel/include/linux/string.h
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
@@ -111,6 +114,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
+char *strreplace(char *s, char old, char new);
extern void kfree_const(const void *x);
diff --git a/kernel/include/linux/string_helpers.h b/kernel/include/linux/string_helpers.h
index 71f711db4..dabe643eb 100644
--- a/kernel/include/linux/string_helpers.h
+++ b/kernel/include/linux/string_helpers.h
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_HEX 0x20
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
- unsigned int flags, const char *esc);
+ unsigned int flags, const char *only);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char *dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *only)
{
- return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
+ return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
}
static inline int string_escape_str(const char *src, char *dst, size_t sz,
- unsigned int flags, const char *esc)
+ unsigned int flags, const char *only)
{
- return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
+ return string_escape_mem(src, strlen(src), dst, sz, flags, only);
}
static inline int string_escape_str_any_np(const char *src, char *dst,
- size_t sz, const char *esc)
+ size_t sz, const char *only)
{
- return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
#endif
diff --git a/kernel/include/linux/sunrpc/addr.h b/kernel/include/linux/sunrpc/addr.h
index 07d8e53be..5c9c6cd08 100644
--- a/kernel/include/linux/sunrpc/addr.h
+++ b/kernel/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@ static inline void rpc_set_port(struct sockaddr *sap,
#define IPV6_SCOPE_DELIMITER '%'
#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
return true;
}
#else /* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
const struct sockaddr *sap2)
{
return false;
@@ -122,15 +122,28 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
if (sap1->sa_family == sap2->sa_family) {
switch (sap1->sa_family) {
case AF_INET:
- return __rpc_cmp_addr4(sap1, sap2);
+ return rpc_cmp_addr4(sap1, sap2);
case AF_INET6:
- return __rpc_cmp_addr6(sap1, sap2);
+ return rpc_cmp_addr6(sap1, sap2);
}
}
return false;
}
/**
+ * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ */
+static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (!rpc_cmp_addr(sap1, sap2))
+ return false;
+ return rpc_get_port(sap1) == rpc_get_port(sap2);
+}
+
+/**
* rpc_copy_addr - copy the address portion of one sockaddr to another
* @dst: destination sockaddr
* @src: source sockaddr
diff --git a/kernel/include/linux/sunrpc/auth.h b/kernel/include/linux/sunrpc/auth.h
index a7cbb570c..1ecf13e14 100644
--- a/kernel/include/linux/sunrpc/auth.h
+++ b/kernel/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
#include <linux/atomic.h>
#include <linux/rcupdate.h>
#include <linux/uidgid.h>
+#include <linux/utsname.h>
-/* size of the nodename buffer */
-#define UNX_MAXNODENAME 32
+/*
+ * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
+ * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
+ */
+#define UNX_MAXNODENAME __NEW_UTS_LEN
struct rpcsec_gss_info;
diff --git a/kernel/include/linux/sunrpc/bc_xprt.h b/kernel/include/linux/sunrpc/bc_xprt.h
index 2ca67b55e..4397a4824 100644
--- a/kernel/include/linux/sunrpc/bc_xprt.h
+++ b/kernel/include/linux/sunrpc/bc_xprt.h
@@ -37,7 +37,11 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
-int bc_send(struct rpc_rqst *req);
+
+/* Socket backchannel transport methods */
+int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs);
+void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs);
+void xprt_free_bc_rqst(struct rpc_rqst *req);
/*
* Determine if a shared backchannel is in use
diff --git a/kernel/include/linux/sunrpc/cache.h b/kernel/include/linux/sunrpc/cache.h
index 437ddb6c4..ed03c9f7f 100644
--- a/kernel/include/linux/sunrpc/cache.h
+++ b/kernel/include/linux/sunrpc/cache.h
@@ -46,10 +46,12 @@
*
*/
struct cache_head {
- struct cache_head * next;
+ struct hlist_node cache_list;
time_t expiry_time; /* After time time, don't use the data */
- time_t last_refresh; /* If CACHE_PENDING, this is when upcall
- * was sent, else this is when update was received
+ time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
+ * sent, else this is when update was
+ * received, though it is alway set to
+ * be *after* ->flush_time.
*/
struct kref ref;
unsigned long flags;
@@ -73,7 +75,7 @@ struct cache_detail_pipefs {
struct cache_detail {
struct module * owner;
int hash_size;
- struct cache_head ** hash_table;
+ struct hlist_head * hash_table;
rwlock_t hash_lock;
atomic_t inuse; /* active user-space update or lookup */
@@ -105,8 +107,12 @@ struct cache_detail {
/* fields below this comment are for internal use
* and should not be touched by cache owners
*/
- time_t flush_time; /* flush all cache items with last_refresh
- * earlier than this */
+ time_t flush_time; /* flush all cache items with
+ * last_refresh at or earlier
+ * than this. last_refresh
+ * is never set at or earlier
+ * than this.
+ */
struct list_head others;
time_t nextcheck;
int entries;
@@ -203,7 +209,7 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
return (h->expiry_time < seconds_since_boot()) ||
- (detail->flush_time > h->last_refresh);
+ (detail->flush_time >= h->last_refresh);
}
extern int cache_check(struct cache_detail *detail,
@@ -224,6 +230,11 @@ extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
umode_t, struct cache_detail *);
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+/* Must store cache_detail in seq_file->private if using next three functions */
+extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop(struct seq_file *file, void *p);
+
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/kernel/include/linux/sunrpc/clnt.h b/kernel/include/linux/sunrpc/clnt.h
index 598ba80ec..131032f15 100644
--- a/kernel/include/linux/sunrpc/clnt.h
+++ b/kernel/include/linux/sunrpc/clnt.h
@@ -56,6 +56,7 @@ struct rpc_clnt {
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
+ atomic_t cl_swapper; /* swapfile count */
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME+1];
struct rpc_pipe_dir_head cl_pipedir_objects;
diff --git a/kernel/include/linux/sunrpc/sched.h b/kernel/include/linux/sunrpc/sched.h
index 5f1e6bd4c..d703f0ef3 100644
--- a/kernel/include/linux/sunrpc/sched.h
+++ b/kernel/include/linux/sunrpc/sched.h
@@ -205,8 +205,7 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
- const struct rpc_call_ops *ops);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
@@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
}
#endif
+#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
+int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
+void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
+#else
+static inline int
+rpc_clnt_swap_activate(struct rpc_clnt *clnt)
+{
+ return -EINVAL;
+}
+
+static inline void
+rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+{
+}
+#endif /* CONFIG_SUNRPC_SWAP */
+
#endif /* _LINUX_SUNRPC_SCHED_H_ */
diff --git a/kernel/include/linux/sunrpc/svc.h b/kernel/include/linux/sunrpc/svc.h
index fae6fb947..cc0fc712b 100644
--- a/kernel/include/linux/sunrpc/svc.h
+++ b/kernel/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
#include <linux/wait.h>
#include <linux/mm.h>
-/*
- * This is the RPC server thread function prototype
- */
-typedef int (*svc_thread_fn)(void *);
-
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
@@ -54,6 +49,25 @@ struct svc_pool {
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+struct svc_serv;
+
+struct svc_serv_ops {
+ /* Callback to use when last thread exits. */
+ void (*svo_shutdown)(struct svc_serv *, struct net *);
+
+ /* function for service threads to run */
+ int (*svo_function)(void *);
+
+ /* queue up a transport for servicing */
+ void (*svo_enqueue_xprt)(struct svc_xprt *);
+
+ /* set up thread (or whatever) execution context */
+ int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+
+ /* optional module to count when adding threads (pooled svcs only) */
+ struct module *svo_module;
+};
+
/*
* RPC service.
*
@@ -85,16 +99,7 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
-
- void (*sv_shutdown)(struct svc_serv *serv,
- struct net *net);
- /* Callback to use when last thread
- * exits.
- */
-
- struct module * sv_module; /* optional module to count when
- * adding threads */
- svc_thread_fn sv_function; /* main function for threads */
+ struct svc_serv_ops *sv_ops; /* server operations */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
@@ -423,19 +428,46 @@ struct svc_procedure {
};
/*
+ * Mode for mapping cpus to pools.
+ */
+enum {
+ SVC_POOL_AUTO = -1, /* choose one of the others */
+ SVC_POOL_GLOBAL, /* no mapping, just a single global pool
+ * (legacy & UP mode) */
+ SVC_POOL_PERCPU, /* one pool per cpu */
+ SVC_POOL_PERNODE /* one pool per numa node */
+};
+
+struct svc_pool_map {
+ int count; /* How many svc_servs use us */
+ int mode; /* Note: int not enum to avoid
+ * warnings about "enumeration value
+ * not handled in switch" */
+ unsigned int npools;
+ unsigned int *pool_to; /* maps pool id to cpu or node */
+ unsigned int *to_pool; /* maps cpu or node to pool id */
+};
+
+extern struct svc_pool_map svc_pool_map;
+
+/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net));
+ struct svc_serv_ops *);
+struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
+ struct svc_pool *pool, int node);
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool, int node);
+void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
+unsigned int svc_pool_map_get(void);
+void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net),
- svc_thread_fn, struct module *);
+ struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
diff --git a/kernel/include/linux/sunrpc/svc_rdma.h b/kernel/include/linux/sunrpc/svc_rdma.h
index df8edf8ec..f869807a0 100644
--- a/kernel/include/linux/sunrpc/svc_rdma.h
+++ b/kernel/include/linux/sunrpc/svc_rdma.h
@@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge {
};
struct svc_rdma_fastreg_mr {
struct ib_mr *mr;
- void *kva;
- struct ib_fast_reg_page_list *page_list;
- int page_list_len;
+ struct scatterlist *sg;
+ int sg_nents;
unsigned long access_flags;
- unsigned long map_len;
enum dma_data_direction direction;
struct list_head frmr_list;
};
@@ -132,6 +130,7 @@ struct svcxprt_rdma {
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
int sc_max_sge;
+ int sc_max_sge_rd; /* max sge for read target */
int sc_sq_depth; /* Depth of SQ */
atomic_t sc_sq_count; /* Number of SQ WR on queue */
@@ -180,12 +179,13 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQUESTS 32
#define RPCRDMA_MAX_REQ_SIZE 4096
+#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+
/* svc_rdma_marshal.c */
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
-extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
- enum rpcrdma_errcode, u32 *);
+ enum rpcrdma_errcode, __be32 *);
extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
@@ -207,12 +207,13 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_sendto(struct svc_rqst *);
+extern struct rpcrdma_read_chunk *
+ svc_rdma_get_read_chunk(struct rpcrdma_msg *);
/* svc_rdma_transport.c */
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
enum rpcrdma_errcode);
-struct page *svc_rdma_get_page(void);
extern int svc_rdma_post_recv(struct svcxprt_rdma *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
@@ -220,96 +221,20 @@ extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
-extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
-extern struct svc_xprt_class svc_rdma_class;
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
+extern struct svc_xprt_class svc_rdma_class;
+#ifdef CONFIG_SUNRPC_BACKCHANNEL
+extern struct svc_xprt_class svc_rdma_bc_class;
+#endif
+
/* svc_rdma.c */
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
-/*
- * Returns the address of the first read chunk or <nul> if no read chunk is
- * present
- */
-static inline struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *ch =
- (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
- if (ch->rc_discrim == 0)
- return NULL;
-
- return ch;
-}
-
-/*
- * Returns the address of the first read write array element or <nul> if no
- * write array list is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
- if (rmsgp->rm_body.rm_chunks[0] != 0
- || rmsgp->rm_body.rm_chunks[1] == 0)
- return NULL;
-
- return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
-
-/*
- * Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *rch;
- struct rpcrdma_write_array *wr_ary;
- struct rpcrdma_write_array *rp_ary;
-
- /* XXX: Need to fix when reply list may occur with read-list and/or
- * write list */
- if (rmsgp->rm_body.rm_chunks[0] != 0 ||
- rmsgp->rm_body.rm_chunks[1] != 0)
- return NULL;
-
- rch = svc_rdma_get_read_chunk(rmsgp);
- if (rch) {
- while (rch->rc_discrim)
- rch++;
-
- /* The reply list follows an empty write array located
- * at 'rc_position' here. The reply array is at rc_target.
- */
- rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-
- goto found_it;
- }
-
- wr_ary = svc_rdma_get_write_array(rmsgp);
- if (wr_ary) {
- rp_ary = (struct rpcrdma_write_array *)
- &wr_ary->
- wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
- goto found_it;
- }
-
- /* No read list, no write list */
- rp_ary = (struct rpcrdma_write_array *)
- &rmsgp->rm_body.rm_chunks[2];
-
- found_it:
- if (rp_ary->wc_discrim == 0)
- return NULL;
-
- return rp_ary;
-}
#endif
diff --git a/kernel/include/linux/sunrpc/svc_xprt.h b/kernel/include/linux/sunrpc/svc_xprt.h
index 79f6f8f3d..78512cfe1 100644
--- a/kernel/include/linux/sunrpc/svc_xprt.h
+++ b/kernel/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
int svc_create_xprt(struct svc_serv *, const char *, struct net *,
const int, const unsigned short, int);
+void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/kernel/include/linux/sunrpc/xprt.h b/kernel/include/linux/sunrpc/xprt.h
index 8b93ef53d..69ef5b3ab 100644
--- a/kernel/include/linux/sunrpc/xprt.h
+++ b/kernel/include/linux/sunrpc/xprt.h
@@ -54,6 +54,8 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
struct seq_file;
+struct svc_serv;
+struct net;
/*
* This describes a complete RPC request
@@ -133,6 +135,15 @@ struct rpc_xprt_ops {
void (*close)(struct rpc_xprt *xprt);
void (*destroy)(struct rpc_xprt *xprt);
void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
+ int (*enable_swap)(struct rpc_xprt *xprt);
+ void (*disable_swap)(struct rpc_xprt *xprt);
+ void (*inject_disconnect)(struct rpc_xprt *xprt);
+ int (*bc_setup)(struct rpc_xprt *xprt,
+ unsigned int min_reqs);
+ int (*bc_up)(struct svc_serv *serv, struct net *net);
+ void (*bc_free_rqst)(struct rpc_rqst *rqst);
+ void (*bc_destroy)(struct rpc_xprt *xprt,
+ unsigned int max_reqs);
};
/*
@@ -150,6 +161,7 @@ enum xprt_transports {
XPRT_TRANSPORT_TCP = IPPROTO_TCP,
XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_RDMA = 256,
+ XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
};
@@ -180,7 +192,7 @@ struct rpc_xprt {
atomic_t num_reqs; /* total slots */
unsigned long state; /* transport state */
unsigned char resvport : 1; /* use a reserved port */
- unsigned int swapper; /* we're swapping over this
+ atomic_t swapper; /* we're swapping over this
transport */
unsigned int bind_index; /* bind function index */
@@ -212,7 +224,8 @@ struct rpc_xprt {
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */
/* process the callback */
- unsigned int bc_alloc_count; /* Total number of preallocs */
+ int bc_alloc_count; /* Total number of preallocs */
+ atomic_t bc_free_slots;
spinlock_t bc_pa_lock; /* Protects the preallocated
* items */
struct list_head bc_pa_list; /* List of preallocated
@@ -241,6 +254,7 @@ struct rpc_xprt {
const char *address_strings[RPC_DISPLAY_MAX];
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *debugfs; /* debugfs directory */
+ atomic_t inject_disconnect;
#endif
};
@@ -327,6 +341,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
return p + xprt->tsh_size;
}
+static inline int
+xprt_enable_swap(struct rpc_xprt *xprt)
+{
+ return xprt->ops->enable_swap(xprt);
+}
+
+static inline void
+xprt_disable_swap(struct rpc_xprt *xprt)
+{
+ xprt->ops->disable_swap(xprt);
+}
+
/*
* Transport switch helper functions
*/
@@ -345,7 +371,6 @@ void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
-int xs_swapper(struct rpc_xprt *xprt, int enable);
bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
void xprt_unlock_connect(struct rpc_xprt *, void *);
@@ -431,6 +456,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+extern unsigned int rpc_inject_disconnect;
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+ if (!rpc_inject_disconnect)
+ return;
+ if (atomic_dec_return(&xprt->inject_disconnect))
+ return;
+ atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
+ xprt->ops->inject_disconnect(xprt);
+}
+#else
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+}
+#endif
+
#endif /* __KERNEL__*/
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/kernel/include/linux/sunrpc/xprtrdma.h b/kernel/include/linux/sunrpc/xprtrdma.h
index c984c8598..b7b279b54 100644
--- a/kernel/include/linux/sunrpc/xprtrdma.h
+++ b/kernel/include/linux/sunrpc/xprtrdma.h
@@ -49,14 +49,15 @@
* a single chunk type per message is supported currently.
*/
#define RPCRDMA_MIN_SLOT_TABLE (2U)
-#define RPCRDMA_DEF_SLOT_TABLE (32U)
+#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
-/* memory registration strategies */
+/* Memory registration strategies, by number.
+ * This is part of a kernel / user space API. Do not remove. */
enum rpcrdma_memreg {
RPCRDMA_BOUNCEBUFFERS = 0,
RPCRDMA_REGISTER,
diff --git a/kernel/include/linux/sunrpc/xprtsock.h b/kernel/include/linux/sunrpc/xprtsock.h
index 357e44c1a..0ece4ba06 100644
--- a/kernel/include/linux/sunrpc/xprtsock.h
+++ b/kernel/include/linux/sunrpc/xprtsock.h
@@ -44,6 +44,8 @@ struct sock_xprt {
*/
unsigned long sock_state;
struct delayed_work connect_worker;
+ struct work_struct recv_worker;
+ struct mutex recv_mutex;
struct sockaddr_storage srcaddr;
unsigned short srcport;
diff --git a/kernel/include/linux/sunxi-rsb.h b/kernel/include/linux/sunxi-rsb.h
new file mode 100644
index 000000000..7e75bb034
--- /dev/null
+++ b/kernel/include/linux/sunxi-rsb.h
@@ -0,0 +1,105 @@
+/*
+ * Allwinner Reduced Serial Bus Driver
+ *
+ * Copyright (c) 2015 Chen-Yu Tsai
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _SUNXI_RSB_H
+#define _SUNXI_RSB_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct sunxi_rsb;
+
+/**
+ * struct sunxi_rsb_device - Basic representation of an RSB device
+ * @dev: Driver model representation of the device.
+ * @ctrl: RSB controller managing the bus hosting this device.
+ * @rtaddr: This device's runtime address
+ * @hwaddr: This device's hardware address
+ */
+struct sunxi_rsb_device {
+ struct device dev;
+ struct sunxi_rsb *rsb;
+ int irq;
+ u8 rtaddr;
+ u16 hwaddr;
+};
+
+static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d)
+{
+ return container_of(d, struct sunxi_rsb_device, dev);
+}
+
+static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev)
+{
+ return dev_get_drvdata(&rdev->dev);
+}
+
+static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev,
+ void *data)
+{
+ dev_set_drvdata(&rdev->dev, data);
+}
+
+/**
+ * struct sunxi_rsb_driver - RSB slave device driver
+ * @driver: RSB device drivers should initialize name and owner field of
+ * this structure.
+ * @probe: binds this driver to a RSB device.
+ * @remove: unbinds this driver from the RSB device.
+ */
+struct sunxi_rsb_driver {
+ struct device_driver driver;
+ int (*probe)(struct sunxi_rsb_device *rdev);
+ int (*remove)(struct sunxi_rsb_device *rdev);
+};
+
+static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d)
+{
+ return container_of(d, struct sunxi_rsb_driver, driver);
+}
+
+int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv);
+
+/**
+ * sunxi_rsb_driver_unregister() - unregister an RSB client driver
+ * @rdrv: the driver to unregister
+ */
+static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv)
+{
+ if (rdrv)
+ driver_unregister(&rdrv->driver);
+}
+
+#define module_sunxi_rsb_driver(__sunxi_rsb_driver) \
+ module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \
+ sunxi_rsb_driver_unregister)
+
+struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/**
+ * devm_regmap_init_sunxi_rsb(): Initialise managed register map
+ *
+ * @rdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sunxi_rsb(rdev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \
+ rdev, config)
+
+#endif /* _SUNXI_RSB_H */
diff --git a/kernel/include/linux/suspend.h b/kernel/include/linux/suspend.h
index 5efe743ce..9b77d4cc9 100644
--- a/kernel/include/linux/suspend.h
+++ b/kernel/include/linux/suspend.h
@@ -194,6 +194,12 @@ struct platform_freeze_ops {
void (*end)(void);
};
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
+extern bool pm_in_action;
+#else
+# define pm_in_action false
+#endif
+
#ifdef CONFIG_SUSPEND
/**
* suspend_set_ops - set platform dependent suspend operations
@@ -202,6 +208,36 @@ struct platform_freeze_ops {
extern void suspend_set_ops(const struct platform_suspend_ops *ops);
extern int suspend_valid_only_mem(suspend_state_t state);
+extern unsigned int pm_suspend_global_flags;
+
+#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0)
+#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1)
+
+static inline void pm_suspend_clear_flags(void)
+{
+ pm_suspend_global_flags = 0;
+}
+
+static inline void pm_set_suspend_via_firmware(void)
+{
+ pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND;
+}
+
+static inline void pm_set_resume_via_firmware(void)
+{
+ pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
+}
+
+static inline bool pm_suspend_via_firmware(void)
+{
+ return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND);
+}
+
+static inline bool pm_resume_via_firmware(void)
+{
+ return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
+}
+
/* Suspend-to-idle state machnine. */
enum freeze_state {
FREEZE_STATE_NONE, /* Not suspended/suspending. */
@@ -241,6 +277,12 @@ extern int pm_suspend(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
+static inline void pm_suspend_clear_flags(void) {}
+static inline void pm_set_suspend_via_firmware(void) {}
+static inline void pm_set_resume_via_firmware(void) {}
+static inline bool pm_suspend_via_firmware(void) { return false; }
+static inline bool pm_resume_via_firmware(void) { return false; }
+
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
@@ -387,10 +429,12 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
+extern unsigned int pm_wakeup_irq;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_wakeup_clear(void);
+extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
@@ -440,6 +484,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(void) {}
+static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
diff --git a/kernel/include/linux/sw842.h b/kernel/include/linux/sw842.h
new file mode 100644
index 000000000..109ba041c
--- /dev/null
+++ b/kernel/include/linux/sw842.h
@@ -0,0 +1,12 @@
+#ifndef __SW842_H__
+#define __SW842_H__
+
+#define SW842_MEM_COMPRESS (0xf000)
+
+int sw842_compress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen, void *wmem);
+
+int sw842_decompress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen);
+
+#endif
diff --git a/kernel/include/linux/swait.h b/kernel/include/linux/swait.h
new file mode 100644
index 000000000..83f004a72
--- /dev/null
+++ b/kernel/include/linux/swait.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_SWAIT_H
+#define _LINUX_SWAIT_H
+
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <asm/current.h>
+
+/*
+ * Simple wait queues
+ *
+ * While these are very similar to the other/complex wait queues (wait.h) the
+ * most important difference is that the simple waitqueue allows for
+ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
+ * times.
+ *
+ * In order to make this so, we had to drop a fair number of features of the
+ * other waitqueue code; notably:
+ *
+ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
+ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
+ * sleeper state.
+ *
+ * - the exclusive mode; because this requires preserving the list order
+ * and this is hard.
+ *
+ * - custom wake functions; because you cannot give any guarantees about
+ * random code.
+ *
+ * As a side effect of this; the data structures are slimmer.
+ *
+ * One would recommend using this wait queue where possible.
+ */
+
+struct task_struct;
+
+struct swait_queue_head {
+ raw_spinlock_t lock;
+ struct list_head task_list;
+};
+
+struct swait_queue {
+ struct task_struct *task;
+ struct list_head task_list;
+};
+
+#define __SWAITQUEUE_INITIALIZER(name) { \
+ .task = current, \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAITQUEUE(name) \
+ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
+
+#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAIT_QUEUE_HEAD(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
+
+extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
+ struct lock_class_key *key);
+
+#define init_swait_queue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ __init_swait_queue_head((q), #q, &__key); \
+ } while (0)
+
+#ifdef CONFIG_LOCKDEP
+# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
+ ({ init_swait_queue_head(&name); name; })
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
+#else
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ DECLARE_SWAIT_QUEUE_HEAD(name)
+#endif
+
+static inline int swait_active(struct swait_queue_head *q)
+{
+ return !list_empty(&q->task_list);
+}
+
+extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_all(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_all_locked(struct swait_queue_head *q);
+
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
+
+extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+#define ___swait_event(wq, condition, state, ret, cmd) \
+({ \
+ struct swait_queue __wait; \
+ long __ret = ret; \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ for (;;) { \
+ long __int = prepare_to_swait_event(&wq, &__wait, state);\
+ \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ break; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_swait(&wq, &__wait); \
+ __ret; \
+})
+
+#define __swait_event(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+#define __swait_event_interruptible(wq, condition) \
+ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event_interruptible(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __swait_event_interruptible(wq, condition); \
+ __ret; \
+})
+
+#define __swait_event_interruptible_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_interruptible_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
+#endif /* _LINUX_SWAIT_H */
diff --git a/kernel/include/linux/swap.h b/kernel/include/linux/swap.h
index 4c07c12d2..da646f2eb 100644
--- a/kernel/include/linux/swap.h
+++ b/kernel/include/linux/swap.h
@@ -354,7 +354,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MEMCG
-extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+ /* root ? */
+ if (mem_cgroup_disabled() || !memcg->css.parent)
+ return vm_swappiness;
+
+ return memcg->swappiness;
+}
+
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -376,11 +384,10 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio, int err);
+extern void end_swap_bio_write(struct bio *bio);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- void (*end_write_func)(struct bio *, int));
+ bio_end_io_t end_write_func);
extern int swap_set_page_dirty(struct page *page);
-extern void end_swap_bio_read(struct bio *bio, int err);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
@@ -402,6 +409,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
+extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr,
+ bool *new_page_allocated);
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
@@ -435,6 +445,7 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
+extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
@@ -526,6 +537,11 @@ static inline int page_swapcount(struct page *page)
return 0;
}
+static inline int swp_swapcount(swp_entry_t entry)
+{
+ return 0;
+}
+
#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
diff --git a/kernel/include/linux/swapops.h b/kernel/include/linux/swapops.h
index cedf3d3c3..5c3a5f3e7 100644
--- a/kernel/include/linux/swapops.h
+++ b/kernel/include/linux/swapops.h
@@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
#ifdef CONFIG_MEMORY_FAILURE
+
+extern atomic_long_t num_poisoned_pages __read_mostly;
+
/*
* Support for hardware poisoned pages
*/
@@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return TestSetPageHWPoison(page);
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+ atomic_long_inc(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_dec(void)
+{
+ atomic_long_dec(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_add(long num)
+{
+ atomic_long_add(num, &num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_sub(long num)
+{
+ atomic_long_sub(num, &num_poisoned_pages);
+}
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return false;
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+}
#endif
#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/kernel/include/linux/syscalls.h b/kernel/include/linux/syscalls.h
index 76d1e38aa..c2b66a277 100644
--- a/kernel/include/linux/syscalls.h
+++ b/kernel/include/linux/syscalls.h
@@ -111,14 +111,14 @@ union bpf_attr;
#define __SC_STR_ADECL(t, a) #a
#define __SC_STR_TDECL(t, a) #t
-extern struct ftrace_event_class event_class_syscall_enter;
-extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_class event_class_syscall_enter;
+extern struct trace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_enter_##sname = { \
.class = &event_class_syscall_enter, \
{ \
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_exit_##sname = { \
.class = &event_class_syscall_exit, \
{ \
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_exit_##sname = &event_exit_##sname;
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
asmlinkage long sys_lchown(const char __user *filename,
uid_t user, gid_t group);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
asmlinkage long sys_lchown16(const char __user *filename,
@@ -810,6 +810,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
asmlinkage long sys_eventfd(unsigned int count);
asmlinkage long sys_eventfd2(unsigned int count, int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_userfaultfd(int flags);
asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
@@ -827,15 +828,15 @@ asmlinkage long sys_syncfs(int fd);
asmlinkage long sys_fork(void);
asmlinkage long sys_vfork(void);
#ifdef CONFIG_CLONE_BACKWARDS
-asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
+asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
#else
#ifdef CONFIG_CLONE_BACKWARDS3
asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
- int __user *, int);
+ int __user *, unsigned long);
#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
- int __user *, int);
+ int __user *, unsigned long);
#endif
#endif
@@ -884,4 +885,8 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
+asmlinkage long sys_membarrier(int cmd, int flags);
+
+asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
+
#endif
diff --git a/kernel/include/linux/sysfs.h b/kernel/include/linux/sysfs.h
index 9f6575831..c6f0f0d0e 100644
--- a/kernel/include/linux/sysfs.h
+++ b/kernel/include/linux/sysfs.h
@@ -64,10 +64,18 @@ do { \
* a new subdirectory with this name.
* @is_visible: Optional: Function to return permissions associated with an
* attribute of the group. Will be called repeatedly for each
- * attribute in the group. Only read/write permissions as well as
- * SYSFS_PREALLOC are accepted. Must return 0 if an attribute is
- * not visible. The returned value will replace static permissions
- * defined in struct attribute or struct bin_attribute.
+ * non-binary attribute in the group. Only read/write
+ * permissions as well as SYSFS_PREALLOC are accepted. Must
+ * return 0 if an attribute is not visible. The returned value
+ * will replace static permissions defined in struct attribute.
+ * @is_bin_visible:
+ * Optional: Function to return permissions associated with a
+ * binary attribute of the group. Will be called repeatedly
+ * for each binary attribute in the group. Only read/write
+ * permissions as well as SYSFS_PREALLOC are accepted. Must
+ * return 0 if a binary attribute is not visible. The returned
+ * value will replace static permissions defined in
+ * struct bin_attribute.
* @attrs: Pointer to NULL terminated list of attributes.
* @bin_attrs: Pointer to NULL terminated list of binary attributes.
* Either attrs or bin_attrs or both must be provided.
@@ -76,6 +84,8 @@ struct attribute_group {
const char *name;
umode_t (*is_visible)(struct kobject *,
struct attribute *, int);
+ umode_t (*is_bin_visible)(struct kobject *,
+ struct bin_attribute *, int);
struct attribute **attrs;
struct bin_attribute **bin_attrs;
};
@@ -268,6 +278,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
+int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
@@ -451,6 +464,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj,
{
}
+static inline int __compat_only_sysfs_link_entry_to_kobj(
+ struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name)
+{
+ return 0;
+}
+
static inline void sysfs_notify(struct kobject *kobj, const char *dir,
const char *attr)
{
diff --git a/kernel/include/linux/syslog.h b/kernel/include/linux/syslog.h
index 4b7b875a7..c3a7f0cc3 100644
--- a/kernel/include/linux/syslog.h
+++ b/kernel/include/linux/syslog.h
@@ -47,12 +47,12 @@
#define SYSLOG_FROM_READER 0
#define SYSLOG_FROM_PROC 1
-int do_syslog(int type, char __user *buf, int count, bool from_file);
+int do_syslog(int type, char __user *buf, int count, int source);
#ifdef CONFIG_PRINTK
-int check_syslog_permissions(int type, bool from_file);
+int check_syslog_permissions(int type, int source);
#else
-static inline int check_syslog_permissions(int type, bool from_file)
+static inline int check_syslog_permissions(int type, int source)
{
return 0;
}
diff --git a/kernel/include/linux/t10-pi.h b/kernel/include/linux/t10-pi.h
index 6a8b99426..dd8de82cf 100644
--- a/kernel/include/linux/t10-pi.h
+++ b/kernel/include/linux/t10-pi.h
@@ -14,9 +14,9 @@ struct t10_pi_tuple {
};
-extern struct blk_integrity t10_pi_type1_crc;
-extern struct blk_integrity t10_pi_type1_ip;
-extern struct blk_integrity t10_pi_type3_crc;
-extern struct blk_integrity t10_pi_type3_ip;
+extern struct blk_integrity_profile t10_pi_type1_crc;
+extern struct blk_integrity_profile t10_pi_type1_ip;
+extern struct blk_integrity_profile t10_pi_type3_crc;
+extern struct blk_integrity_profile t10_pi_type3_ip;
#endif
diff --git a/kernel/include/linux/tcp.h b/kernel/include/linux/tcp.h
index e8bbf4036..b386361ba 100644
--- a/kernel/include/linux/tcp.h
+++ b/kernel/include/linux/tcp.h
@@ -112,10 +112,11 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
+ struct skb_mstamp snt_synack; /* first SYNACK sent time */
bool tfo_listener;
+ u32 txhash;
u32 rcv_isn;
u32 snt_isn;
- u32 snt_synack; /* synack sent time */
u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
@@ -149,11 +150,16 @@ struct tcp_sock {
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
+ u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 snd_nxt; /* Next sequence we send */
-
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
@@ -188,6 +194,12 @@ struct tcp_sock {
u32 window_clamp; /* Maximal window to advertise */
u32 rcv_ssthresh; /* Current window clamp */
+ /* Information of the most recently (s)acked skb */
+ struct tcp_rack {
+ struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u8 advanced; /* mstamp advanced since last lost marking */
+ u8 reord; /* reordering detected */
+ } rack;
u16 advmss; /* Advertised MSS */
u8 unused;
u8 nonagle : 4,/* Disable Nagle algorithm? */
@@ -201,6 +213,7 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
@@ -210,6 +223,9 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
+ struct rtt_meas {
+ u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
+ } rtt_min[3];
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -273,8 +289,6 @@ struct tcp_sock {
int lost_cnt_hint;
u32 retransmit_high; /* L-bits may be on up to this seqno */
- u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
-
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -328,6 +342,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock *fastopen_rsk;
+ u32 *saved_syn;
};
enum tsq_flags {
@@ -348,8 +363,8 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
- u32 tw_rcv_nxt;
- u32 tw_snd_nxt;
+#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
+#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
u32 tw_rcv_wnd;
u32 tw_ts_offset;
u32 tw_ts_recent;
@@ -374,25 +389,25 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
tcp_sk(sk)->fastopen_rsk != NULL);
}
-extern void tcp_sock_destruct(struct sock *sk);
+static inline void fastopen_queue_tune(struct sock *sk, int backlog)
+{
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
+
+ queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
+}
+
+static inline void tcp_move_syn(struct tcp_sock *tp,
+ struct request_sock *req)
+{
+ tp->saved_syn = req->saved_syn;
+ req->saved_syn = NULL;
+}
-static inline int fastopen_init_queue(struct sock *sk, int backlog)
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
{
- struct request_sock_queue *queue =
- &inet_csk(sk)->icsk_accept_queue;
-
- if (queue->fastopenq == NULL) {
- queue->fastopenq = kzalloc(
- sizeof(struct fastopen_queue),
- sk->sk_allocation);
- if (queue->fastopenq == NULL)
- return -ENOMEM;
-
- sk->sk_destruct = tcp_sock_destruct;
- spin_lock_init(&queue->fastopenq->lock);
- }
- queue->fastopenq->max_qlen = backlog;
- return 0;
+ kfree(tp->saved_syn);
+ tp->saved_syn = NULL;
}
#endif /* _LINUX_TCP_H */
diff --git a/kernel/include/linux/thermal.h b/kernel/include/linux/thermal.h
index 5eac31649..e13a1ace5 100644
--- a/kernel/include/linux/thermal.h
+++ b/kernel/include/linux/thermal.h
@@ -40,10 +40,18 @@
/* No upper/lower limit requirement */
#define THERMAL_NO_LIMIT ((u32)~0)
+/* Default weight of a bound cooling device */
+#define THERMAL_WEIGHT_DEFAULT 0
+
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID -274000
+
/* Unit conversion macros */
-#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
- ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
-#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
+#define DECI_KELVIN_TO_CELSIUS(t) ({ \
+ long _t = (t); \
+ ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \
+})
+#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732)
#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
@@ -56,10 +64,13 @@
#define DEFAULT_THERMAL_GOVERNOR "fair_share"
#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
#define DEFAULT_THERMAL_GOVERNOR "user_space"
+#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
+#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
#endif
struct thermal_zone_device;
struct thermal_cooling_device;
+struct thermal_instance;
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
@@ -86,23 +97,19 @@ struct thermal_zone_device_ops {
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
- int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_temp) (struct thermal_zone_device *, int *);
int (*get_mode) (struct thermal_zone_device *,
enum thermal_device_mode *);
int (*set_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_temp) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
- int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
+ int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
+ int (*get_crit_temp) (struct thermal_zone_device *, int *);
+ int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
int (*notify) (struct thermal_zone_device *, int,
@@ -113,6 +120,12 @@ struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+ int (*get_requested_power)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, u32 *);
+ int (*state2power)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, unsigned long, u32 *);
+ int (*power2state)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, u32, unsigned long *);
};
struct thermal_cooling_device {
@@ -144,8 +157,7 @@ struct thermal_attr {
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
* @passive_delay: number of milliseconds to wait between polls when
- * performing passive cooling. Currenty only used by the
- * step-wise governor
+ * performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
@@ -155,13 +167,14 @@ struct thermal_attr {
* @last_temperature: previous temperature read
* @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
* @passive: 1 if you've crossed a passive trip point, 0 otherwise.
- * Currenty only used by the step-wise governor.
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
+ * @governor_data: private pointer for governor data
* @thermal_instances: list of &struct thermal_instance of this thermal zone
* @idr: &struct idr to generate unique id for this zone's cooling
* devices
@@ -185,9 +198,11 @@ struct thermal_zone_device {
int emul_temperature;
int passive;
unsigned int forced_passive;
+ atomic_t need_update;
struct thermal_zone_device_ops *ops;
- const struct thermal_zone_params *tzp;
+ struct thermal_zone_params *tzp;
struct thermal_governor *governor;
+ void *governor_data;
struct list_head thermal_instances;
struct idr idr;
struct mutex lock;
@@ -198,12 +213,19 @@ struct thermal_zone_device {
/**
* struct thermal_governor - structure that holds thermal governor information
* @name: name of the governor
+ * @bind_to_tz: callback called when binding to a thermal zone. If it
+ * returns 0, the governor is bound to the thermal zone,
+ * otherwise it fails.
+ * @unbind_from_tz: callback called when a governor is unbound from a
+ * thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
char name[THERMAL_NAME_LENGTH];
+ int (*bind_to_tz)(struct thermal_zone_device *tz);
+ void (*unbind_from_tz)(struct thermal_zone_device *tz);
int (*throttle)(struct thermal_zone_device *tz, int trip);
struct list_head governor_list;
};
@@ -214,9 +236,12 @@ struct thermal_bind_params {
/*
* This is a measure of 'how effectively these devices can
- * cool 'this' thermal zone. The shall be determined by platform
- * characterization. This is on a 'percentage' scale.
- * See Documentation/thermal/sysfs-api.txt for more information.
+ * cool 'this' thermal zone. It shall be determined by
+ * platform characterization. This value is relative to the
+ * rest of the weights so a cooling device whose weight is
+ * double that of another cooling device is twice as
+ * effective. See Documentation/thermal/sysfs-api.txt for more
+ * information.
*/
int weight;
@@ -253,6 +278,44 @@ struct thermal_zone_params {
int num_tbps; /* Number of tbp entries */
struct thermal_bind_params *tbp;
+
+ /*
+ * Sustainable power (heat) that this thermal zone can dissipate in
+ * mW
+ */
+ u32 sustainable_power;
+
+ /*
+ * Proportional parameter of the PID controller when
+ * overshooting (i.e., when temperature is below the target)
+ */
+ s32 k_po;
+
+ /*
+ * Proportional parameter of the PID controller when
+ * undershooting
+ */
+ s32 k_pu;
+
+ /* Integral parameter of the PID controller */
+ s32 k_i;
+
+ /* Derivative parameter of the PID controller */
+ s32 k_d;
+
+ /* threshold below which the error is no longer accumulated */
+ s32 integral_cutoff;
+
+ /*
+ * @slope: slope of a linear temperature adjustment curve.
+ * Used by thermal zone drivers.
+ */
+ int slope;
+ /*
+ * @offset: offset of a linear temperature adjustment curve.
+ * Used by thermal zone drivers (default 0).
+ */
+ int offset;
};
struct thermal_genl_event {
@@ -272,9 +335,9 @@ struct thermal_genl_event {
* temperature.
*/
struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, long *);
+ int (*get_temp)(void *, int *);
int (*get_trend)(void *, long *);
- int (*set_emul_temp)(void *, unsigned long);
+ int (*set_emul_temp)(void *, int);
};
/**
@@ -304,7 +367,7 @@ static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline
@@ -316,14 +379,27 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
#endif
#if IS_ENABLED(CONFIG_THERMAL)
+static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
+{
+ return cdev->ops->get_requested_power && cdev->ops->state2power &&
+ cdev->ops->power2state;
+}
+
+int power_actor_get_max_power(struct thermal_cooling_device *,
+ struct thermal_zone_device *tz, u32 *max_power);
+int power_actor_get_min_power(struct thermal_cooling_device *,
+ struct thermal_zone_device *tz, u32 *min_power);
+int power_actor_set_power(struct thermal_cooling_device *,
+ struct thermal_instance *, u32);
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+ struct thermal_zone_params *, int, int);
void thermal_zone_device_unregister(struct thermal_zone_device *);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
- unsigned long, unsigned long);
+ unsigned long, unsigned long,
+ unsigned int);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *);
@@ -335,7 +411,7 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -343,6 +419,18 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
#else
+static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
+{ return false; }
+static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 *max_power)
+{ return 0; }
+static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 *min_power)
+{ return -ENODEV; }
+static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
+ struct thermal_instance *tz, u32 power)
+{ return 0; }
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
@@ -355,7 +443,8 @@ static inline void thermal_zone_device_unregister(
static inline int thermal_zone_bind_cooling_device(
struct thermal_zone_device *tz, int trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower)
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
{ return -ENODEV; }
static inline int thermal_zone_unbind_cooling_device(
struct thermal_zone_device *tz, int trip,
@@ -378,7 +467,7 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
const char *name)
{ return ERR_PTR(-ENODEV); }
static inline int thermal_zone_get_temp(
- struct thermal_zone_device *tz, unsigned long *temp)
+ struct thermal_zone_device *tz, int *temp)
{ return -ENODEV; }
static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
{ return -ENODEV; }
diff --git a/kernel/include/linux/ti_wilink_st.h b/kernel/include/linux/ti_wilink_st.h
index c78dcfeaf..0a0d56834 100644
--- a/kernel/include/linux/ti_wilink_st.h
+++ b/kernel/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
-extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
@@ -159,6 +158,7 @@ struct st_data_s {
unsigned long ll_state;
void *kim_data;
struct tty_struct *tty;
+ struct work_struct work_write_wakeup;
};
/*
diff --git a/kernel/include/linux/tick.h b/kernel/include/linux/tick.h
index f8492da57..e312219ff 100644
--- a/kernel/include/linux/tick.h
+++ b/kernel/include/linux/tick.h
@@ -13,8 +13,6 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
-extern void tick_freeze(void);
-extern void tick_unfreeze(void);
/* Should be core only, but ARM BL switcher requires it */
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
-static inline void tick_freeze(void) { }
-static inline void tick_unfreeze(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
+extern void tick_freeze(void);
+extern void tick_unfreeze(void);
+#else
+static inline void tick_freeze(void) { }
+static inline void tick_unfreeze(void) { }
+#endif
+
#ifdef CONFIG_TICK_ONESHOT
extern void tick_irq_enter(void);
# ifndef arch_needs_cpu
@@ -63,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
-#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
-static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; }
+static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ return 0;
+}
#endif
static inline void tick_broadcast_enable(void)
@@ -134,21 +141,44 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
-extern void __tick_nohz_full_check(void);
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
+{
+ if (tick_nohz_full_enabled())
+ cpumask_or(mask, mask, tick_nohz_full_mask);
+}
+
+static inline int housekeeping_any_cpu(void)
+{
+ return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+}
+
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
-extern void __tick_nohz_task_switch(struct task_struct *tsk);
+extern void __tick_nohz_task_switch(void);
#else
+static inline int housekeeping_any_cpu(void)
+{
+ return smp_processor_id();
+}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
-static inline void __tick_nohz_full_check(void) { }
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
-static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
+static inline void __tick_nohz_task_switch(void) { }
#endif
+static inline const struct cpumask *housekeeping_cpumask(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_enabled())
+ return housekeeping_mask;
+#endif
+ return cpu_possible_mask;
+}
+
static inline bool is_housekeeping_cpu(int cpu)
{
#ifdef CONFIG_NO_HZ_FULL
@@ -167,16 +197,10 @@ static inline void housekeeping_affine(struct task_struct *t)
#endif
}
-static inline void tick_nohz_full_check(void)
-{
- if (tick_nohz_full_enabled())
- __tick_nohz_full_check();
-}
-
-static inline void tick_nohz_task_switch(struct task_struct *tsk)
+static inline void tick_nohz_task_switch(void)
{
if (tick_nohz_full_enabled())
- __tick_nohz_task_switch(tsk);
+ __tick_nohz_task_switch();
}
#endif
diff --git a/kernel/include/linux/time64.h b/kernel/include/linux/time64.h
index a3831478d..367d5af89 100644
--- a/kernel/include/linux/time64.h
+++ b/kernel/include/linux/time64.h
@@ -2,6 +2,7 @@
#define _LINUX_TIME64_H
#include <uapi/linux/time.h>
+#include <linux/math64.h>
typedef __s64 time64_t;
@@ -11,11 +12,18 @@ typedef __s64 time64_t;
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
+#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
+
+struct itimerspec64 {
+ struct timespec64 it_interval;
+ struct timespec64 it_value;
+};
+
#endif
/* Parameters used to convert the timespec values: */
@@ -28,6 +36,7 @@ struct timespec64 {
#define FSEC_PER_SEC 1000000000000000LL
/* Located here for timespec[64]_valid_strict */
+#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
@@ -43,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ts;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ return *its64;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ return *its;
+}
+
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
@@ -75,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ret;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ struct itimerspec ret;
+
+ ret.it_interval = timespec64_to_timespec(its64->it_interval);
+ ret.it_value = timespec64_to_timespec(its64->it_value);
+ return ret;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ struct itimerspec64 ret;
+
+ ret.it_interval = timespec_to_timespec64(its->it_interval);
+ ret.it_value = timespec_to_timespec64(its->it_value);
+ return ret;
+}
+
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
diff --git a/kernel/include/linux/timekeeper_internal.h b/kernel/include/linux/timekeeper_internal.h
index fb8696385..25247220b 100644
--- a/kernel/include/linux/timekeeper_internal.h
+++ b/kernel/include/linux/timekeeper_internal.h
@@ -49,6 +49,8 @@ struct tk_read_base {
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
+ * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
+ * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
+ * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -85,6 +90,8 @@ struct timekeeper {
ktime_t offs_boot;
ktime_t offs_tai;
s32 tai_offset;
+ unsigned int clock_was_set_seq;
+ ktime_t next_leap_ktime;
struct timespec64 raw_time;
/* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
+#ifdef CONFIG_DEBUG_TIMEKEEPING
+ long last_warning;
+ /*
+ * These simple flag variables are managed
+ * without locks, which is racy, but they are
+ * ok since we don't really care about being
+ * super precise about how many events were
+ * seen, just that a problem was observed.
+ */
+ int underflow_seen;
+ int overflow_seen;
+#endif
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/kernel/include/linux/timekeeping.h b/kernel/include/linux/timekeeping.h
index 99176af21..ec89d8463 100644
--- a/kernel/include/linux/timekeeping.h
+++ b/kernel/include/linux/timekeeping.h
@@ -18,10 +18,17 @@ extern int do_sys_settimeofday(const struct timespec *tv,
* Kernel time accessors
*/
unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
+struct timespec64 current_kernel_time64(void);
/* does not take xtime_lock */
struct timespec __current_kernel_time(void);
+static inline struct timespec current_kernel_time(void)
+{
+ struct timespec64 now = current_kernel_time64();
+
+ return timespec64_to_timespec(now);
+}
+
/*
* timespec based interfaces
*/
@@ -145,7 +152,6 @@ static inline void getboottime(struct timespec *ts)
}
#endif
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
/*
@@ -163,6 +169,7 @@ extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
+extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
@@ -256,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/*
* PPS accessor
*/
-extern void getnstime_raw_and_real(struct timespec *ts_raw,
- struct timespec *ts_real);
+extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
+ struct timespec64 *ts_real);
/*
* Persistent clock related interfaces
@@ -266,7 +273,6 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock(struct timespec *ts);
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock(struct timespec *ts);
extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock(struct timespec now);
extern int update_persistent_clock64(struct timespec64 now);
diff --git a/kernel/include/linux/timer.h b/kernel/include/linux/timer.h
index 5fcd72c57..299d2b785 100644
--- a/kernel/include/linux/timer.h
+++ b/kernel/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
* All fields that change during normal runtime grouped to the
* same cacheline
*/
- struct list_head entry;
- unsigned long expires;
- struct tvec_base *base;
-
- void (*function)(unsigned long);
- unsigned long data;
-
- int slack;
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+ u32 flags;
+ int slack;
#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
+ struct lockdep_map lockdep_map;
#endif
};
-extern struct tvec_base boot_tvec_bases;
-
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
#endif
/*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
* A deferrable timer will work normally when the system is busy, but
* will not cause a CPU to come out of idle just to service it; instead,
* the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_DEFERRABLE 0x1LU
-#define TIMER_IRQSAFE 0x2LU
-
-#define TIMER_FLAG_MASK 0x3LU
+#define TIMER_CPUMASK 0x0007FFFF
+#define TIMER_MIGRATING 0x00080000
+#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_IRQSAFE 0x00200000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .flags = (_flags), \
.slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.next != NULL;
+ return timer->entry.pprev != NULL;
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
-/*
* Timer-statistics info:
*/
#ifdef CONFIG_TIMER_STATS
extern int timer_stats_active;
-#define TIMER_STATS_FLAG_DEFERRABLE 0x1
-
extern void init_timer_stats(void);
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
+ void *timerf, char *comm, u32 flags);
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/kernel/include/linux/timerqueue.h b/kernel/include/linux/timerqueue.h
index a520fd70a..7eec17ad7 100644
--- a/kernel/include/linux/timerqueue.h
+++ b/kernel/include/linux/timerqueue.h
@@ -16,10 +16,10 @@ struct timerqueue_head {
};
-extern void timerqueue_add(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
- struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+ struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
diff --git a/kernel/include/linux/timex.h b/kernel/include/linux/timex.h
index 9d3f1a5b6..39c25dbeb 100644
--- a/kernel/include/linux/timex.h
+++ b/kernel/include/linux/timex.h
@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
extern int do_adjtimex(struct timex *);
-extern void hardpps(const struct timespec *, const struct timespec *);
+extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
void ntp_notify_cmos_timer(void);
diff --git a/kernel/include/linux/topology.h b/kernel/include/linux/topology.h
index 909b6e43b..73ddad1e0 100644
--- a/kernel/include/linux/topology.h
+++ b/kernel/include/linux/topology.h
@@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_cpumask
-#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#ifndef topology_sibling_cpumask
+#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
@@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
#ifdef CONFIG_SCHED_SMT
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
- return topology_thread_cpumask(cpu);
+ return topology_sibling_cpumask(cpu);
}
#endif
diff --git a/kernel/include/linux/tpm.h b/kernel/include/linux/tpm.h
index 8350c538b..706e63eea 100644
--- a/kernel/include/linux/tpm.h
+++ b/kernel/include/linux/tpm.h
@@ -30,6 +30,8 @@
#define TPM_ANY_NUM 0xFFFF
struct tpm_chip;
+struct trusted_key_payload;
+struct trusted_key_options;
struct tpm_class_ops {
const u8 req_complete_mask;
@@ -46,11 +48,22 @@ struct tpm_class_ops {
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+extern int tpm_is_tpm2(u32 chip_num);
extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
+extern int tpm_seal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+extern int tpm_unseal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
#else
+static inline int tpm_is_tpm2(u32 chip_num)
+{
+ return -ENODEV;
+}
static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
return -ENODEV;
}
@@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
return -ENODEV;
}
+
+static inline int tpm_seal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ return -ENODEV;
+}
+static inline int tpm_unseal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/kernel/include/linux/ftrace_event.h b/kernel/include/linux/trace_events.h
index 2ef42aa7e..a591f414d 100644
--- a/kernel/include/linux/ftrace_event.h
+++ b/kernel/include/linux/trace_events.h
@@ -1,6 +1,6 @@
-#ifndef _LINUX_FTRACE_EVENT_H
-#define _LINUX_FTRACE_EVENT_H
+#ifndef _LINUX_TRACE_EVENT_H
+#define _LINUX_TRACE_EVENT_H
#include <linux/ring_buffer.h>
#include <linux/trace_seq.h>
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 {
const char *name;
};
-const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
- unsigned long flags,
- const struct trace_print_flags *flag_array);
+const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
-const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array);
+const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
#if BITS_PER_LONG == 32
-const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
- unsigned long long val,
- const struct trace_print_flags_u64
+const char *trace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
*symbol_array);
#endif
-const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
- unsigned int bitmask_size);
+const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size);
-const char *ftrace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+const char *trace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
-const char *ftrace_print_array_seq(struct trace_seq *p,
+const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);
struct trace_iterator;
struct trace_event;
-int ftrace_raw_output_prep(struct trace_iterator *iter,
- struct trace_event *event);
+int trace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -71,7 +71,7 @@ struct trace_entry {
unsigned char preempt_lazy_count;
};
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
/*
@@ -135,8 +135,8 @@ struct trace_event {
struct trace_event_functions *funcs;
};
-extern int register_ftrace_event(struct trace_event *event);
-extern int unregister_ftrace_event(struct trace_event *event);
+extern int register_trace_event(struct trace_event *event);
+extern int unregister_trace_event(struct trace_event *event);
/* Return values for print_line callback */
enum print_line_t {
@@ -160,24 +160,23 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
-struct ftrace_event_file;
+struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
- struct ftrace_event_file *ftrace_file,
+ struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len,
unsigned long flags, int pc);
-void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
@@ -186,7 +185,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
void tracing_record_cmdline(struct task_struct *tsk);
-int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
struct event_filter;
@@ -203,50 +202,39 @@ enum trace_reg {
#endif
};
-struct ftrace_event_call;
+struct trace_event_call;
-struct ftrace_event_class {
+struct trace_event_class {
const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
- int (*reg)(struct ftrace_event_call *event,
+ int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
- int (*define_fields)(struct ftrace_event_call *);
- struct list_head *(*get_fields)(struct ftrace_event_call *);
+ int (*define_fields)(struct trace_event_call *);
+ struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
- int (*raw_init)(struct ftrace_event_call *);
+ int (*raw_init)(struct trace_event_call *);
};
-extern int ftrace_event_reg(struct ftrace_event_call *event,
+extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
-int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
- char *fmt, ...);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
-
-struct ftrace_event_buffer {
+struct trace_event_buffer {
struct ring_buffer *buffer;
struct ring_buffer_event *event;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
};
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
- struct ftrace_event_file *ftrace_file,
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ struct trace_event_file *trace_file,
unsigned long len);
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -257,6 +245,7 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
+ TRACE_EVENT_FL_UPROBE_BIT,
};
/*
@@ -264,13 +253,14 @@ enum {
* FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
- * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
+ * UPROBE - Event is a uprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -281,11 +271,14 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
+ TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
};
-struct ftrace_event_call {
+#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+
+struct trace_event_call {
struct list_head list;
- struct ftrace_event_class *class;
+ struct trace_event_class *class;
union {
char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
@@ -300,7 +293,7 @@ struct ftrace_event_call {
* bit 0: filter_active
* bit 1: allow trace by non root (cap any)
* bit 2: failed to apply filter
- * bit 3: ftrace internal event (do not enable)
+ * bit 3: trace internal event (do not enable)
* bit 4: Event was enabled by module
* bit 5: use call filter rather than file filter
* bit 6: Event is a tracepoint
@@ -312,13 +305,13 @@ struct ftrace_event_call {
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
- int (*perf_perm)(struct ftrace_event_call *,
+ int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
#endif
};
static inline const char *
-ftrace_event_name(struct ftrace_event_call *call)
+trace_event_name(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
@@ -327,21 +320,22 @@ ftrace_event_name(struct ftrace_event_call *call)
}
struct trace_array;
-struct ftrace_subsystem_dir;
+struct trace_subsystem_dir;
enum {
- FTRACE_EVENT_FL_ENABLED_BIT,
- FTRACE_EVENT_FL_RECORDED_CMD_BIT,
- FTRACE_EVENT_FL_FILTERED_BIT,
- FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
- FTRACE_EVENT_FL_SOFT_MODE_BIT,
- FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
- FTRACE_EVENT_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_ENABLED_BIT,
+ EVENT_FILE_FL_RECORDED_CMD_BIT,
+ EVENT_FILE_FL_FILTERED_BIT,
+ EVENT_FILE_FL_NO_SET_FILTER_BIT,
+ EVENT_FILE_FL_SOFT_MODE_BIT,
+ EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ EVENT_FILE_FL_TRIGGER_MODE_BIT,
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_PID_FILTER_BIT,
};
/*
- * Ftrace event file flags:
+ * Event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
* FILTERED - The event has a filter attached
@@ -351,25 +345,27 @@ enum {
* tracepoint may be enabled)
* TRIGGER_MODE - When set, invoke the triggers associated with the event
* TRIGGER_COND - When set, one or more triggers has an associated filter
+ * PID_FILTER - When set, the event is filtered based on pid
*/
enum {
- FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
- FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
- FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
- FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
- FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
- FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
- FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
- FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+ EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
+ EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
+ EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
+ EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
+ EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
+ EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
};
-struct ftrace_event_file {
+struct trace_event_file {
struct list_head list;
- struct ftrace_event_call *event_call;
+ struct trace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
- struct ftrace_subsystem_dir *system;
+ struct trace_subsystem_dir *system;
struct list_head triggers;
/*
@@ -402,7 +398,7 @@ struct ftrace_event_file {
early_initcall(trace_init_flags_##name);
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
- static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ static int perf_perm_##name(struct trace_event_call *tp_event, \
struct perf_event *p_event) \
{ \
return ({ expr; }); \
@@ -428,19 +424,21 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+extern int filter_check_discard(struct trace_event_file *file, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
+extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
-extern void event_triggers_post_call(struct ftrace_event_file *file,
+extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);
+bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+
/**
- * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
+ * trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
*
* If any triggers without filters are attached to this event, they
@@ -449,15 +447,17 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
* otherwise false.
*/
static inline bool
-ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
+trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
+ if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
event_triggers_call(file, NULL);
- if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
+ if (eflags & EVENT_FILE_FL_PID_FILTER)
+ return trace_event_ignore_this_pid(file);
}
return false;
}
@@ -476,7 +476,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
* Returns true if the event is discarded, false otherwise.
*/
static inline bool
-__event_trigger_test_discard(struct ftrace_event_file *file,
+__event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
@@ -484,10 +484,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
{
unsigned long eflags = file->flags;
- if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, entry);
- if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
ring_buffer_discard_commit(buffer, event);
else if (!filter_check_discard(file, entry, buffer, event))
return false;
@@ -509,7 +509,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
* if the event is soft disabled and should be discarded.
*/
static inline void
-event_trigger_unlock_commit(struct ftrace_event_file *file,
+event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
@@ -517,7 +517,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
event_triggers_post_call(file, tt);
@@ -540,7 +540,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
*/
static inline void
-event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
+event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
@@ -549,14 +549,14 @@ event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(buffer, event,
+ trace_buffer_unlock_commit_regs(file->tr, buffer, event,
irq_flags, pc, regs);
if (tt)
event_triggers_post_call(file, tt);
}
-#ifdef CONFIG_BPF_SYSCALL
+#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
@@ -571,14 +571,16 @@ enum {
FILTER_DYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_COMM,
+ FILTER_CPU,
};
-extern int trace_event_raw_init(struct ftrace_event_call *call);
-extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+extern int trace_event_raw_init(struct trace_event_call *call);
+extern int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
-extern int trace_add_event_call(struct ftrace_event_call *call);
-extern int trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_add_event_call(struct trace_event_call *call);
+extern int trace_remove_event_call(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -627,4 +629,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
}
#endif
-#endif /* _LINUX_FTRACE_EVENT_H */
+#endif /* _LINUX_TRACE_EVENT_H */
diff --git a/kernel/include/linux/tracehook.h b/kernel/include/linux/tracehook.h
index 84d497297..26c152122 100644
--- a/kernel/include/linux/tracehook.h
+++ b/kernel/include/linux/tracehook.h
@@ -50,6 +50,7 @@
#include <linux/ptrace.h>
#include <linux/security.h>
#include <linux/task_work.h>
+#include <linux/memcontrol.h>
struct linux_binprm;
/*
@@ -188,6 +189,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
smp_mb__after_atomic();
if (unlikely(current->task_works))
task_work_run();
+
+ mem_cgroup_handle_over_high();
}
#endif /* <linux/tracehook.h> */
diff --git a/kernel/include/linux/tracepoint.h b/kernel/include/linux/tracepoint.h
index a5f7f3eca..27e32b2b6 100644
--- a/kernel/include/linux/tracepoint.h
+++ b/kernel/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
* See the file COPYING for more details.
*/
+#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/static_key.h>
@@ -26,6 +28,7 @@ struct notifier_block;
struct tracepoint_func {
void *func;
void *data;
+ int prio;
};
struct tracepoint {
@@ -42,9 +45,14 @@ struct trace_enum_map {
unsigned long enum_value;
};
+#define TRACEPOINT_DEFAULT_PRIO 10
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
+tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
+ int prio);
+extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
@@ -111,7 +119,18 @@ extern void syscall_unregfunc(void);
#define TP_ARGS(args...) args
#define TP_CONDITION(args...) args
-#ifdef CONFIG_TRACEPOINTS
+/*
+ * Individual subsystem my have a separate configuration to
+ * enable their tracepoints. By default, this file will create
+ * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
+ * wants to be able to disable its tracepoints from being created
+ * it can define NOTRACE before including the tracepoint headers.
+ */
+#if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE)
+#define TRACEPOINTS_ENABLED
+#endif
+
+#ifdef TRACEPOINTS_ENABLED
/*
* it_func[0] is never NULL because there is at least one element in the array
@@ -167,10 +186,11 @@ extern void syscall_unregfunc(void);
* structure. Force alignment to the same alignment as the section start.
*
* When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on or we match the
- * condition. This lets us find RCU issues triggered with tracepoints even
- * when this tracepoint is off. This code has no purpose other than poking
- * RCU a bit.
+ * the tracepoint code, regardless of whether tracing is on. However,
+ * don't check if the condition is false, due to interaction with idle
+ * instrumentation. This lets us find RCU issues triggered with tracepoints
+ * even when this tracepoint is off. This code has no purpose other than
+ * poking RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -196,6 +216,13 @@ extern void syscall_unregfunc(void);
(void *)probe, data); \
} \
static inline int \
+ register_trace_prio_##name(void (*probe)(data_proto), void *data,\
+ int prio) \
+ { \
+ return tracepoint_probe_register_prio(&__tracepoint_##name, \
+ (void *)probe, data, prio); \
+ } \
+ static inline int \
unregister_trace_##name(void (*probe)(data_proto), void *data) \
{ \
return tracepoint_probe_unregister(&__tracepoint_##name,\
@@ -234,7 +261,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
-#else /* !CONFIG_TRACEPOINTS */
+#else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
@@ -266,7 +293,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
-#endif /* CONFIG_TRACEPOINTS */
+#endif /* TRACEPOINTS_ENABLED */
#ifdef CONFIG_TRACING
/**
@@ -327,15 +354,19 @@ extern void syscall_unregfunc(void);
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
- __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+ __DECLARE_TRACE(name, void, , \
+ cpu_online(raw_smp_processor_id()), \
+ void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
diff --git a/kernel/include/linux/tty.h b/kernel/include/linux/tty.h
index d76631f61..6b6e811f4 100644
--- a/kernel/include/linux/tty.h
+++ b/kernel/include/linux/tty.h
@@ -227,7 +227,6 @@ struct tty_port {
int blocked_open; /* Waiting to open */
int count; /* Usage count */
wait_queue_head_t open_wait; /* Open waiters */
- wait_queue_head_t close_wait; /* Close waiters */
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* TTY flags ASY_*/
unsigned char console:1, /* port is a console */
@@ -422,8 +421,9 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
const char *routine);
-extern char *tty_name(struct tty_struct *tty, char *buf);
+extern const char *tty_name(const struct tty_struct *tty);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+extern int __tty_check_change(struct tty_struct *tty, int sig);
extern int tty_check_change(struct tty_struct *tty);
extern void __stop_tty(struct tty_struct *tty);
extern void stop_tty(struct tty_struct *tty);
@@ -467,6 +467,8 @@ extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
extern void tty_buffer_set_lock_subclass(struct tty_port *port);
+extern bool tty_buffer_restart_work(struct tty_port *port);
+extern bool tty_buffer_cancel_work(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
extern void tty_termios_encode_baud_rate(struct ktermios *termios,
@@ -605,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
size_t size, unsigned icanon);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
@@ -613,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern int tty_audit_push_current(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty,
- unsigned char *data, size_t size, unsigned icanon)
+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ size_t size, unsigned icanon)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
@@ -652,54 +654,11 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* tty_mutex.c */
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern int tty_lock_interruptible(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
extern void tty_set_lock_subclass(struct tty_struct *tty);
-/*
- * this shall be called only from where BTM is held (like close)
- *
- * We need this to ensure nobody waits for us to finish while we are waiting.
- * Without this we were encountering system stalls.
- *
- * This should be indeed removed with BTM removal later.
- *
- * Locking: BTM required. Nobody is allowed to hold port->mutex.
- */
-static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
- long timeout)
-{
- tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
- tty_wait_until_sent(tty, timeout);
- tty_lock(tty);
-}
-
-/*
- * wait_event_interruptible_tty -- wait for a condition with the tty lock held
- *
- * The condition we are waiting for might take a long time to
- * become true, or might depend on another thread taking the
- * BTM. In either case, we need to drop the BTM to guarantee
- * forward progress. This is a leftover from the conversion
- * from the BKL and should eventually get removed as the BTM
- * falls out of use.
- *
- * Do not use in new code.
- */
-#define wait_event_interruptible_tty(tty, wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __ret = __wait_event_interruptible_tty(tty, wq, \
- condition); \
- __ret; \
-})
-
-#define __wait_event_interruptible_tty(tty, wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
- tty_unlock(tty); \
- schedule(); \
- tty_lock(tty))
#ifdef CONFIG_PROC_FS
extern void proc_tty_register_driver(struct tty_driver *);
@@ -709,4 +668,10 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {}
static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
#endif
+#define tty_debug(tty, f, args...) \
+ do { \
+ printk(KERN_DEBUG "%s: %s: " f, __func__, \
+ tty_name(tty), ##args); \
+ } while (0)
+
#endif
diff --git a/kernel/include/linux/tty_driver.h b/kernel/include/linux/tty_driver.h
index 92e337c18..161052477 100644
--- a/kernel/include/linux/tty_driver.h
+++ b/kernel/include/linux/tty_driver.h
@@ -296,7 +296,7 @@ struct tty_operations {
struct tty_driver {
int magic; /* magic number for this structure */
struct kref kref; /* Reference management */
- struct cdev *cdevs;
+ struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
diff --git a/kernel/include/linux/types.h b/kernel/include/linux/types.h
index 8715287c3..70dd3dfde 100644
--- a/kernel/include/linux/types.h
+++ b/kernel/include/linux/types.h
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
@@ -205,13 +205,30 @@ struct ustat {
* struct callback_head - callback structure for use with RCU and task_work
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
+ *
+ * The struct is aligned to size of pointer. On most architectures it happens
+ * naturally due ABI requirements, but some architectures (like CRIS) have
+ * weird ABI and we need to ask it explicitly.
+ *
+ * The alignment is required to guarantee that bits 0 and 1 of @next will be
+ * clear under normal conditions -- as long as we use call_rcu(),
+ * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ *
+ * This guarantee is important for few reasons:
+ * - future call_rcu_lazy() will make use of lower bits in the pointer;
+ * - the structure shares storage spacer in struct page with @compound_head,
+ * which encode PageTail() in bit 0. The guarantee is needed to avoid
+ * false-positive PageTail().
*/
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
-};
+} __attribute__((aligned(sizeof(void *))));
#define rcu_head callback_head
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
/* clocksource cycle base type */
typedef u64 cycle_t;
diff --git a/kernel/include/linux/u64_stats_sync.h b/kernel/include/linux/u64_stats_sync.h
index 4b4439e75..df89c9bcb 100644
--- a/kernel/include/linux/u64_stats_sync.h
+++ b/kernel/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@ struct u64_stats_sync {
};
+static inline void u64_stats_init(struct u64_stats_sync *syncp)
+{
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-# define u64_stats_init(syncp) seqcount_init(syncp.seq)
-#else
-# define u64_stats_init(syncp) do { } while (0)
+ seqcount_init(&syncp->seq);
#endif
+}
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/kernel/include/linux/uaccess.h b/kernel/include/linux/uaccess.h
index 941b2dab5..cf5c472bb 100644
--- a/kernel/include/linux/uaccess.h
+++ b/kernel/include/linux/uaccess.h
@@ -77,36 +77,6 @@ static inline unsigned long __copy_from_user_nocache(void *to,
#endif /* ARCH_HAS_NOCACHE_UACCESS */
-/**
- * probe_kernel_address(): safely attempt to read from a location
- * @addr: address to read from - its type is type typeof(retval)*
- * @retval: read into this variable
- *
- * Safely read from address @addr into variable @revtal. If a kernel fault
- * happens, handle that and return -EFAULT.
- * We ensure that the __get_user() is executed in atomic context so that
- * do_page_fault() doesn't attempt to take mmap_sem. This makes
- * probe_kernel_address() suitable for use within regions where the caller
- * already holds mmap_sem, or other locks which nest inside mmap_sem.
- * This must be a macro because __get_user() needs to know the types of the
- * args.
- *
- * We don't include enough header files to be able to do the set_fs(). We
- * require that the probe_kernel_address() caller will do that.
- */
-#define probe_kernel_address(addr, retval) \
- ({ \
- long ret; \
- mm_segment_t old_fs = get_fs(); \
- \
- set_fs(KERNEL_DS); \
- pagefault_disable(); \
- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
- pagefault_enable(); \
- set_fs(old_fs); \
- ret; \
- })
-
/*
* probe_kernel_read(): safely attempt to read from a location
* @dst: pointer to the buffer that shall take the data
@@ -131,4 +101,16 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
+/**
+ * probe_kernel_address(): safely attempt to read from a location
+ * @addr: address to read from
+ * @retval: read into this variable
+ *
+ * Returns 0 on success, or -EFAULT.
+ */
+#define probe_kernel_address(addr, retval) \
+ probe_kernel_read(&retval, addr, sizeof(retval))
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/kernel/include/linux/ucs2_string.h b/kernel/include/linux/ucs2_string.h
index cbb20afdb..bb679b48f 100644
--- a/kernel/include/linux/ucs2_string.h
+++ b/kernel/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+ unsigned long maxlength);
+
#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/kernel/include/linux/ulpi/driver.h b/kernel/include/linux/ulpi/driver.h
new file mode 100644
index 000000000..388f6e08b
--- /dev/null
+++ b/kernel/include/linux/ulpi/driver.h
@@ -0,0 +1,60 @@
+#ifndef __LINUX_ULPI_DRIVER_H
+#define __LINUX_ULPI_DRIVER_H
+
+#include <linux/mod_devicetable.h>
+
+#include <linux/device.h>
+
+struct ulpi_ops;
+
+/**
+ * struct ulpi - describes ULPI PHY device
+ * @id: vendor and product ids for ULPI device
+ * @ops: I/O access
+ * @dev: device interface
+ */
+struct ulpi {
+ struct ulpi_device_id id;
+ struct ulpi_ops *ops;
+ struct device dev;
+};
+
+#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
+
+static inline void ulpi_set_drvdata(struct ulpi *ulpi, void *data)
+{
+ dev_set_drvdata(&ulpi->dev, data);
+}
+
+static inline void *ulpi_get_drvdata(struct ulpi *ulpi)
+{
+ return dev_get_drvdata(&ulpi->dev);
+}
+
+/**
+ * struct ulpi_driver - describes a ULPI PHY driver
+ * @id_table: array of device identifiers supported by this driver
+ * @probe: binds this driver to ULPI device
+ * @remove: unbinds this driver from ULPI device
+ * @driver: the name and owner members must be initialized by the drivers
+ */
+struct ulpi_driver {
+ const struct ulpi_device_id *id_table;
+ int (*probe)(struct ulpi *ulpi);
+ void (*remove)(struct ulpi *ulpi);
+ struct device_driver driver;
+};
+
+#define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver)
+
+int ulpi_register_driver(struct ulpi_driver *drv);
+void ulpi_unregister_driver(struct ulpi_driver *drv);
+
+#define module_ulpi_driver(__ulpi_driver) \
+ module_driver(__ulpi_driver, ulpi_register_driver, \
+ ulpi_unregister_driver)
+
+int ulpi_read(struct ulpi *ulpi, u8 addr);
+int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val);
+
+#endif /* __LINUX_ULPI_DRIVER_H */
diff --git a/kernel/include/linux/ulpi/interface.h b/kernel/include/linux/ulpi/interface.h
new file mode 100644
index 000000000..4de8ab491
--- /dev/null
+++ b/kernel/include/linux/ulpi/interface.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_ULPI_INTERFACE_H
+#define __LINUX_ULPI_INTERFACE_H
+
+#include <linux/types.h>
+
+struct ulpi;
+
+/**
+ * struct ulpi_ops - ULPI register access
+ * @dev: the interface provider
+ * @read: read operation for ULPI register access
+ * @write: write operation for ULPI register access
+ */
+struct ulpi_ops {
+ struct device *dev;
+ int (*read)(struct ulpi_ops *ops, u8 addr);
+ int (*write)(struct ulpi_ops *ops, u8 addr, u8 val);
+};
+
+struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *);
+void ulpi_unregister_interface(struct ulpi *);
+
+#endif /* __LINUX_ULPI_INTERFACE_H */
diff --git a/kernel/include/linux/ulpi/regs.h b/kernel/include/linux/ulpi/regs.h
new file mode 100644
index 000000000..b5b8b8804
--- /dev/null
+++ b/kernel/include/linux/ulpi/regs.h
@@ -0,0 +1,130 @@
+#ifndef __LINUX_ULPI_REGS_H
+#define __LINUX_ULPI_REGS_H
+
+/*
+ * Macros for Set and Clear
+ * See ULPI 1.1 specification to find the registers with Set and Clear offsets
+ */
+#define ULPI_SET(a) (a + 1)
+#define ULPI_CLR(a) (a + 2)
+
+/*
+ * Register Map
+ */
+#define ULPI_VENDOR_ID_LOW 0x00
+#define ULPI_VENDOR_ID_HIGH 0x01
+#define ULPI_PRODUCT_ID_LOW 0x02
+#define ULPI_PRODUCT_ID_HIGH 0x03
+#define ULPI_FUNC_CTRL 0x04
+#define ULPI_IFC_CTRL 0x07
+#define ULPI_OTG_CTRL 0x0a
+#define ULPI_USB_INT_EN_RISE 0x0d
+#define ULPI_USB_INT_EN_FALL 0x10
+#define ULPI_USB_INT_STS 0x13
+#define ULPI_USB_INT_LATCH 0x14
+#define ULPI_DEBUG 0x15
+#define ULPI_SCRATCH 0x16
+/* Optional Carkit Registers */
+#define ULPI_CARKIT_CTRL 0x19
+#define ULPI_CARKIT_INT_DELAY 0x1c
+#define ULPI_CARKIT_INT_EN 0x1d
+#define ULPI_CARKIT_INT_STS 0x20
+#define ULPI_CARKIT_INT_LATCH 0x21
+#define ULPI_CARKIT_PLS_CTRL 0x22
+/* Other Optional Registers */
+#define ULPI_TX_POS_WIDTH 0x25
+#define ULPI_TX_NEG_WIDTH 0x26
+#define ULPI_POLARITY_RECOVERY 0x27
+/* Access Extended Register Set */
+#define ULPI_ACCESS_EXTENDED 0x2f
+/* Vendor Specific */
+#define ULPI_VENDOR_SPECIFIC 0x30
+/* Extended Registers */
+#define ULPI_EXT_VENDOR_SPECIFIC 0x80
+
+/*
+ * Register Bits
+ */
+
+/* Function Control */
+#define ULPI_FUNC_CTRL_XCVRSEL BIT(0)
+#define ULPI_FUNC_CTRL_XCVRSEL_MASK 0x3
+#define ULPI_FUNC_CTRL_HIGH_SPEED 0x0
+#define ULPI_FUNC_CTRL_FULL_SPEED 0x1
+#define ULPI_FUNC_CTRL_LOW_SPEED 0x2
+#define ULPI_FUNC_CTRL_FS4LS 0x3
+#define ULPI_FUNC_CTRL_TERMSELECT BIT(2)
+#define ULPI_FUNC_CTRL_OPMODE BIT(3)
+#define ULPI_FUNC_CTRL_OPMODE_MASK (0x3 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0x0 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (0x1 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (0x2 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (0x3 << 3)
+#define ULPI_FUNC_CTRL_RESET BIT(5)
+#define ULPI_FUNC_CTRL_SUSPENDM BIT(6)
+
+/* Interface Control */
+#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE BIT(0)
+#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE BIT(1)
+#define ULPI_IFC_CTRL_CARKITMODE BIT(2)
+#define ULPI_IFC_CTRL_CLOCKSUSPENDM BIT(3)
+#define ULPI_IFC_CTRL_AUTORESUME BIT(4)
+#define ULPI_IFC_CTRL_EXTERNAL_VBUS BIT(5)
+#define ULPI_IFC_CTRL_PASSTHRU BIT(6)
+#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE BIT(7)
+
+/* OTG Control */
+#define ULPI_OTG_CTRL_ID_PULLUP BIT(0)
+#define ULPI_OTG_CTRL_DP_PULLDOWN BIT(1)
+#define ULPI_OTG_CTRL_DM_PULLDOWN BIT(2)
+#define ULPI_OTG_CTRL_DISCHRGVBUS BIT(3)
+#define ULPI_OTG_CTRL_CHRGVBUS BIT(4)
+#define ULPI_OTG_CTRL_DRVVBUS BIT(5)
+#define ULPI_OTG_CTRL_DRVVBUS_EXT BIT(6)
+#define ULPI_OTG_CTRL_EXTVBUSIND BIT(7)
+
+/* USB Interrupt Enable Rising,
+ * USB Interrupt Enable Falling,
+ * USB Interrupt Status and
+ * USB Interrupt Latch
+ */
+#define ULPI_INT_HOST_DISCONNECT BIT(0)
+#define ULPI_INT_VBUS_VALID BIT(1)
+#define ULPI_INT_SESS_VALID BIT(2)
+#define ULPI_INT_SESS_END BIT(3)
+#define ULPI_INT_IDGRD BIT(4)
+
+/* Debug */
+#define ULPI_DEBUG_LINESTATE0 BIT(0)
+#define ULPI_DEBUG_LINESTATE1 BIT(1)
+
+/* Carkit Control */
+#define ULPI_CARKIT_CTRL_CARKITPWR BIT(0)
+#define ULPI_CARKIT_CTRL_IDGNDDRV BIT(1)
+#define ULPI_CARKIT_CTRL_TXDEN BIT(2)
+#define ULPI_CARKIT_CTRL_RXDEN BIT(3)
+#define ULPI_CARKIT_CTRL_SPKLEFTEN BIT(4)
+#define ULPI_CARKIT_CTRL_SPKRIGHTEN BIT(5)
+#define ULPI_CARKIT_CTRL_MICEN BIT(6)
+
+/* Carkit Interrupt Enable */
+#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE BIT(0)
+#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL BIT(1)
+#define ULPI_CARKIT_INT_EN_CARINTDET BIT(2)
+#define ULPI_CARKIT_INT_EN_DP_RISE BIT(3)
+#define ULPI_CARKIT_INT_EN_DP_FALL BIT(4)
+
+/* Carkit Interrupt Status and
+ * Carkit Interrupt Latch
+ */
+#define ULPI_CARKIT_INT_IDFLOAT BIT(0)
+#define ULPI_CARKIT_INT_CARINTDET BIT(1)
+#define ULPI_CARKIT_INT_DP BIT(2)
+
+/* Carkit Pulse Control*/
+#define ULPI_CARKIT_PLS_CTRL_TXPLSEN BIT(0)
+#define ULPI_CARKIT_PLS_CTRL_RXPLSEN BIT(1)
+#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN BIT(2)
+#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN BIT(3)
+
+#endif /* __LINUX_ULPI_REGS_H */
diff --git a/kernel/include/linux/uprobes.h b/kernel/include/linux/uprobes.h
index f5a644c64..0a294e950 100644
--- a/kernel/include/linux/uprobes.h
+++ b/kernel/include/linux/uprobes.h
@@ -21,7 +21,7 @@
* Authors:
* Srikar Dronamraju
* Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/errno.h>
@@ -93,6 +93,22 @@ struct uprobe_task {
unsigned int depth;
};
+struct return_instance {
+ struct uprobe *uprobe;
+ unsigned long func;
+ unsigned long stack; /* stack pointer */
+ unsigned long orig_ret_vaddr; /* original return address */
+ bool chained; /* true, if instance is nested */
+
+ struct return_instance *next; /* keep as stack */
+};
+
+enum rp_check {
+ RP_CHECK_CALL,
+ RP_CHECK_CHAIN_CALL,
+ RP_CHECK_RET,
+};
+
struct xol_area;
struct uprobes_state {
@@ -129,6 +145,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
diff --git a/kernel/include/linux/usb.h b/kernel/include/linux/usb.h
index 447fe29b5..b79925dd2 100644
--- a/kernel/include/linux/usb.h
+++ b/kernel/include/linux/usb.h
@@ -122,6 +122,8 @@ enum usb_interface_condition {
* has been deferred.
* @needs_binding: flag set when the driver should be re-probed or unbound
* following a reset or suspend operation it doesn't support.
+ * @authorized: This allows to (de)authorize individual interfaces instead
+ * a whole device in contrast to the device authorization.
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
@@ -178,6 +180,7 @@ struct usb_interface {
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
+ unsigned authorized:1; /* used for interface authorization */
struct device dev; /* interface specific device info */
struct device *usb_dev;
@@ -325,6 +328,7 @@ struct usb_host_bos {
/* wireless cap descriptor is handled by wusb */
struct usb_ext_cap_descriptor *ext_cap;
struct usb_ss_cap_descriptor *ss_cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
};
@@ -507,6 +511,8 @@ struct usb3_lpm_parameters {
* @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
* @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
* @usb3_lpm_enabled: USB3 hardware LPM enabled
+ * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled
+ * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled
* @string_langid: language ID for strings
* @product: iProduct string, if present (static)
* @manufacturer: iManufacturer string, if present (static)
@@ -580,6 +586,8 @@ struct usb_device {
unsigned usb2_hw_lpm_enabled:1;
unsigned usb2_hw_lpm_allowed:1;
unsigned usb3_lpm_enabled:1;
+ unsigned usb3_lpm_u1_enabled:1;
+ unsigned usb3_lpm_u2_enabled:1;
int string_langid;
/* static strings from the device */
diff --git a/kernel/include/linux/usb/cdc.h b/kernel/include/linux/usb/cdc.h
new file mode 100644
index 000000000..b5706f94e
--- /dev/null
+++ b/kernel/include/linux/usb/cdc.h
@@ -0,0 +1,51 @@
+/*
+ * USB CDC common helpers
+ *
+ * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#ifndef __LINUX_USB_CDC_H
+#define __LINUX_USB_CDC_H
+
+#include <uapi/linux/usb/cdc.h>
+
+/*
+ * inofficial magic numbers
+ */
+
+#define CDC_PHONET_MAGIC_NUMBER 0xAB
+
+/*
+ * parsing CDC headers
+ */
+
+struct usb_cdc_parsed_header {
+ struct usb_cdc_union_desc *usb_cdc_union_desc;
+ struct usb_cdc_header_desc *usb_cdc_header_desc;
+
+ struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor;
+ struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor;
+ struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc;
+ struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc;
+ struct usb_cdc_ether_desc *usb_cdc_ether_desc;
+ struct usb_cdc_dmm_desc *usb_cdc_dmm_desc;
+ struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc;
+ struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc;
+ struct usb_cdc_obex_desc *usb_cdc_obex_desc;
+ struct usb_cdc_ncm_desc *usb_cdc_ncm_desc;
+ struct usb_cdc_mbim_desc *usb_cdc_mbim_desc;
+ struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc;
+
+ bool phonet_magic_present;
+};
+
+struct usb_interface;
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ struct usb_interface *intf,
+ u8 *buffer,
+ int buflen);
+
+#endif /* __LINUX_USB_CDC_H */
diff --git a/kernel/include/linux/usb/cdc_ncm.h b/kernel/include/linux/usb/cdc_ncm.h
index 7c9b48473..3a375d07d 100644
--- a/kernel/include/linux/usb/cdc_ncm.h
+++ b/kernel/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
+/* Driver flags */
+#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
spinlock_t mtx;
atomic_t stop;
+ int drvflags;
u32 timer_interval;
u32 max_ndp_size;
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -133,7 +138,8 @@ struct cdc_ncm_ctx {
};
u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/kernel/include/linux/usb/ch9.h b/kernel/include/linux/usb/ch9.h
index 27603bcbb..6cc96bb12 100644
--- a/kernel/include/linux/usb/ch9.h
+++ b/kernel/include/linux/usb/ch9.h
@@ -32,9 +32,9 @@
#ifndef __LINUX_USB_CH9_H
#define __LINUX_USB_CH9_H
+#include <linux/device.h>
#include <uapi/linux/usb/ch9.h>
-
/**
* usb_speed_string() - Returns human readable-name of the speed.
* @speed: The speed to return human-readable name for. If it's not
@@ -43,6 +43,15 @@
*/
extern const char *usb_speed_string(enum usb_device_speed speed);
+/**
+ * usb_get_maximum_speed - Get maximum requested speed for a given USB
+ * controller.
+ * @dev: Pointer to the given USB controller device
+ *
+ * The function gets the maximum speed string from property "maximum-speed",
+ * and returns the corresponding enum usb_device_speed.
+ */
+extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
/**
* usb_state_string - Returns human readable name for the state.
diff --git a/kernel/include/linux/usb/chipidea.h b/kernel/include/linux/usb/chipidea.h
index ab94f78c4..5dd75fa47 100644
--- a/kernel/include/linux/usb/chipidea.h
+++ b/kernel/include/linux/usb/chipidea.h
@@ -5,9 +5,28 @@
#ifndef __LINUX_USB_CHIPIDEA_H
#define __LINUX_USB_CHIPIDEA_H
+#include <linux/extcon.h>
#include <linux/usb/otg.h>
struct ci_hdrc;
+
+/**
+ * struct ci_hdrc_cable - structure for external connector cable state tracking
+ * @state: current state of the line
+ * @changed: set to true when extcon event happen
+ * @edev: device which generate events
+ * @ci: driver state of the chipidea device
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct ci_hdrc_cable {
+ bool state;
+ bool changed;
+ struct extcon_dev *edev;
+ struct ci_hdrc *ci;
+ struct notifier_block nb;
+};
+
struct ci_hdrc_platform_data {
const char *name;
/* offset of the capability registers */
@@ -19,8 +38,11 @@ struct ci_hdrc_platform_data {
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
+#define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1)
#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
-#define CI_HDRC_DISABLE_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_HOST_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \
+ CI_HDRC_DISABLE_HOST_STREAMING)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
* but otg is not supported (no register otgsc).
@@ -29,12 +51,27 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
#define CI_HDRC_FORCE_FULLSPEED BIT(6)
#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
+#define CI_HDRC_SET_NON_ZERO_TTHA BIT(8)
+#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
+#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
+#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
+ struct usb_otg_caps ci_otg_caps;
bool tpl_support;
+ /* interrupt threshold setting */
+ u32 itc_setting;
+ u32 ahb_burst_config;
+ u32 tx_burst_size;
+ u32 rx_burst_size;
+
+ /* VBUS and ID signal state tracking, using extcon framework */
+ struct ci_hdrc_cable vbus_extcon;
+ struct ci_hdrc_cable id_extcon;
+ u32 phy_clkgate_delay_us;
};
/* Default offset of capability registers */
diff --git a/kernel/include/linux/usb/composite.h b/kernel/include/linux/usb/composite.h
index 2511469a9..1074b8921 100644
--- a/kernel/include/linux/usb/composite.h
+++ b/kernel/include/linux/usb/composite.h
@@ -228,6 +228,8 @@ struct usb_function {
struct list_head list;
DECLARE_BITMAP(endpoints, 32);
const struct usb_function_instance *fi;
+
+ unsigned int bind_deactivated:1;
};
int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/kernel/include/linux/usb/gadget.h b/kernel/include/linux/usb/gadget.h
index 4f3dfb7d0..3d583a10b 100644
--- a/kernel/include/linux/usb/gadget.h
+++ b/kernel/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@ struct usb_ep_ops {
};
/**
+ * struct usb_ep_caps - endpoint capabilities description
+ * @type_control:Endpoint supports control type (reserved for ep0).
+ * @type_iso:Endpoint supports isochronous transfers.
+ * @type_bulk:Endpoint supports bulk transfers.
+ * @type_int:Endpoint supports interrupt transfers.
+ * @dir_in:Endpoint supports IN direction.
+ * @dir_out:Endpoint supports OUT direction.
+ */
+struct usb_ep_caps {
+ unsigned type_control:1;
+ unsigned type_iso:1;
+ unsigned type_bulk:1;
+ unsigned type_int:1;
+ unsigned dir_in:1;
+ unsigned dir_out:1;
+};
+
+#define USB_EP_CAPS_TYPE_CONTROL 0x01
+#define USB_EP_CAPS_TYPE_ISO 0x02
+#define USB_EP_CAPS_TYPE_BULK 0x04
+#define USB_EP_CAPS_TYPE_INT 0x08
+#define USB_EP_CAPS_TYPE_ALL \
+ (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+#define USB_EP_CAPS_DIR_IN 0x01
+#define USB_EP_CAPS_DIR_OUT 0x02
+#define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
+
+#define USB_EP_CAPS(_type, _dir) \
+ { \
+ .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
+ .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
+ .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
+ .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
+ .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
+ .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
+ }
+
+/**
* struct usb_ep - device side representation of USB endpoint
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
+ * @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
* the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,16 @@ struct usb_ep_ops {
* gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
* and is accessed only in response to a driver setup() callback.
*/
+
struct usb_ep {
void *driver_data;
const char *name;
const struct usb_ep_ops *ops;
struct list_head ep_list;
+ struct usb_ep_caps caps;
+ bool claimed;
+ bool enabled;
unsigned maxpacket:16;
unsigned maxpacket_limit:16;
unsigned max_streams:16;
@@ -222,7 +265,18 @@ static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
*/
static inline int usb_ep_enable(struct usb_ep *ep)
{
- return ep->ops->enable(ep, ep->desc);
+ int ret;
+
+ if (ep->enabled)
+ return 0;
+
+ ret = ep->ops->enable(ep, ep->desc);
+ if (ret)
+ return ret;
+
+ ep->enabled = true;
+
+ return 0;
}
/**
@@ -239,7 +293,18 @@ static inline int usb_ep_enable(struct usb_ep *ep)
*/
static inline int usb_ep_disable(struct usb_ep *ep)
{
- return ep->ops->disable(ep);
+ int ret;
+
+ if (!ep->enabled)
+ return 0;
+
+ ret = ep->ops->disable(ep);
+ if (ret)
+ return ret;
+
+ ep->enabled = false;
+
+ return 0;
}
/**
@@ -492,6 +557,9 @@ struct usb_gadget_ops {
int (*udc_start)(struct usb_gadget *,
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
+ struct usb_ep *(*match_ep)(struct usb_gadget *,
+ struct usb_endpoint_descriptor *,
+ struct usb_ss_ep_comp_descriptor *);
};
/**
@@ -511,6 +579,7 @@ struct usb_gadget_ops {
* @dev: Driver model state for this abstract device.
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
+ * @otg_caps: OTG capabilities of this gadget.
* @sg_supported: true if we can handle scatter-gather
* @is_otg: True if the USB device port uses a Mini-AB jack, so that the
* gadget driver must provide a USB OTG descriptor.
@@ -526,6 +595,9 @@ struct usb_gadget_ops {
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
* @is_selfpowered: if the gadget is self-powered.
+ * @deactivated: True if gadget is deactivated - in deactivated state it cannot
+ * be connected.
+ * @connected: True if gadget is connected.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -559,6 +631,7 @@ struct usb_gadget {
struct device dev;
unsigned out_epnum;
unsigned in_epnum;
+ struct usb_otg_caps *otg_caps;
unsigned sg_supported:1;
unsigned is_otg:1;
@@ -567,7 +640,12 @@ struct usb_gadget {
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
unsigned quirk_ep_out_aligned_size:1;
+ unsigned quirk_altset_not_supp:1;
+ unsigned quirk_stall_not_supp:1;
+ unsigned quirk_zlp_not_supp:1;
unsigned is_selfpowered:1;
+ unsigned deactivated:1;
+ unsigned connected:1;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -584,7 +662,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
#define gadget_for_each_ep(tmp, gadget) \
list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
-
/**
* usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
* requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +680,34 @@ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
}
/**
+ * gadget_is_altset_supported - return true iff the hardware supports
+ * altsettings
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_altset_supported(struct usb_gadget *g)
+{
+ return !g->quirk_altset_not_supp;
+}
+
+/**
+ * gadget_is_stall_supported - return true iff the hardware supports stalling
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_stall_supported(struct usb_gadget *g)
+{
+ return !g->quirk_stall_not_supp;
+}
+
+/**
+ * gadget_is_zlp_supported - return true iff the hardware supports zlp
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_zlp_supported(struct usb_gadget *g)
+{
+ return !g->quirk_zlp_not_supp;
+}
+
+/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
* @g: controller that might support both high and full speeds
*/
@@ -771,9 +876,24 @@ static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
*/
static inline int usb_gadget_connect(struct usb_gadget *gadget)
{
+ int ret;
+
if (!gadget->ops->pullup)
return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 1);
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will be connected automatically after activation.
+ */
+ gadget->connected = true;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+ return ret;
}
/**
@@ -784,20 +904,88 @@ static inline int usb_gadget_connect(struct usb_gadget *gadget)
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ if (!gadget->ops->pullup)
+ return -EOPNOTSUPP;
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+ return ret;
+}
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
* This routine may be used during the gadget driver bind() call to prevent
* the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_connect() is called. For example, user mode components may
+ * usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
* Returns zero on success, else negative errno.
*/
-static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
{
- if (!gadget->ops->pullup)
- return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 0);
+ int ret;
+
+ if (gadget->deactivated)
+ return 0;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect(gadget);
+ if (ret)
+ return ret;
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+ return 0;
}
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ if (!gadget->deactivated)
+ return 0;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ return usb_gadget_connect(gadget);
+
+ return 0;
+}
/*-------------------------------------------------------------------------*/
@@ -1002,6 +1190,10 @@ int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **ss);
void usb_free_all_descriptors(struct usb_function *f);
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+ struct usb_gadget *gadget);
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+ struct usb_descriptor_header *otg_desc);
/*-------------------------------------------------------------------------*/
/* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1226,21 @@ extern void usb_gadget_giveback_request(struct usb_ep *ep,
/*-------------------------------------------------------------------------*/
+/* utility to find endpoint by name */
+
+extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
+ const char *name);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to check if endpoint caps match descriptor needs */
+
+extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp);
+
+/*-------------------------------------------------------------------------*/
+
/* utility to update vbus status for udc core, it may be scheduled */
extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
@@ -1049,6 +1256,8 @@ extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *,
struct usb_endpoint_descriptor *,
struct usb_ss_ep_comp_descriptor *);
+extern void usb_ep_autoconfig_release(struct usb_ep *);
+
extern void usb_ep_autoconfig_reset(struct usb_gadget *);
#endif /* __LINUX_USB_GADGET_H */
diff --git a/kernel/include/linux/usb/gadget_configfs.h b/kernel/include/linux/usb/gadget_configfs.h
index d74c0ae98..c36e95730 100644
--- a/kernel/include/linux/usb/gadget_configfs.h
+++ b/kernel/include/linux/usb/gadget_configfs.h
@@ -7,9 +7,10 @@ int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev);
#define GS_STRINGS_W(__struct, __name) \
- static ssize_t __struct##_##__name##_store(struct __struct *gs, \
+static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
+ struct __struct *gs = to_##__struct(item); \
int ret; \
\
ret = usb_string_copy(page, &gs->__name); \
@@ -19,30 +20,20 @@ int check_user_usb_string(const char *name,
}
#define GS_STRINGS_R(__struct, __name) \
- static ssize_t __struct##_##__name##_show(struct __struct *gs, \
- char *page) \
+static ssize_t __struct##_##__name##_show(struct config_item *item, char *page) \
{ \
+ struct __struct *gs = to_##__struct(item); \
return sprintf(page, "%s\n", gs->__name ?: ""); \
}
-#define GS_STRING_ITEM_ATTR(struct_name, name) \
- static struct struct_name##_attribute struct_name##_##name = \
- __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \
- struct_name##_##name##_show, \
- struct_name##_##name##_store)
-
#define GS_STRINGS_RW(struct_name, _name) \
GS_STRINGS_R(struct_name, _name) \
GS_STRINGS_W(struct_name, _name) \
- GS_STRING_ITEM_ATTR(struct_name, _name)
+ CONFIGFS_ATTR(struct_name##_, _name)
#define USB_CONFIG_STRING_RW_OPS(struct_in) \
- CONFIGFS_ATTR_OPS(struct_in); \
- \
static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
- .show_attribute = struct_in##_attr_show, \
- .store_attribute = struct_in##_attr_store, \
}; \
\
static struct config_item_type struct_in##_langid_type = { \
diff --git a/kernel/include/linux/usb/hcd.h b/kernel/include/linux/usb/hcd.h
index 68b1e836d..f89c24bd5 100644
--- a/kernel/include/linux/usb/hcd.h
+++ b/kernel/include/linux/usb/hcd.h
@@ -58,12 +58,6 @@
*
* Since "struct usb_bus" is so thin, you can't share much code in it.
* This framework is a layer over that, and should be more sharable.
- *
- * @authorized_default: Specifies if new devices are authorized to
- * connect by default or they require explicit
- * user space authorization; this bit is settable
- * through /sys/class/usb_host/X/authorized_default.
- * For the rest is RO, so we don't lock to r/w it.
*/
/*-------------------------------------------------------------------------*/
@@ -120,6 +114,8 @@ struct usb_hcd {
#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
+#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -131,6 +127,22 @@ struct usb_hcd {
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+ /*
+ * Specifies if interfaces are authorized by default
+ * or they require explicit user space authorization; this bit is
+ * settable through /sys/class/usb_host/X/interface_authorized_default
+ */
+#define HCD_INTF_AUTHORIZED(hcd) \
+ ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED))
+
+ /*
+ * Specifies if devices are authorized by default
+ * or they require explicit user space authorization; this bit is
+ * settable through /sys/class/usb_host/X/authorized_default
+ */
+#define HCD_DEV_AUTHORIZED(hcd) \
+ ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED))
+
/* Flags that get set only during HCD registration or removal. */
unsigned rh_registered:1;/* is root hub registered? */
unsigned rh_pollable:1; /* may we poll the root hub? */
@@ -141,7 +153,6 @@ struct usb_hcd {
* support the new root-hub polling mechanism. */
unsigned uses_new_polling:1;
unsigned wireless:1; /* Wireless USB HCD */
- unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
@@ -239,6 +250,7 @@ struct hc_driver {
#define HCD_USB2 0x0020 /* USB 2.0 */
#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
+#define HCD_USB31 0x0050 /* USB 3.1 */
#define HCD_MASK 0x0070
#define HCD_BH 0x0100 /* URB complete in BH context */
@@ -564,9 +576,9 @@ extern void usb_ep0_reinit(struct usb_device *);
/*-------------------------------------------------------------------------*/
-/* class requests from USB 3.0 hub spec, table 10-5 */
-#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
-#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
+/* class requests from USB 3.1 hub spec, table 10-7 */
+#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
+#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
/*
* Generic bandwidth allocation constants/support
@@ -622,8 +634,6 @@ extern struct list_head usb_bus_list;
extern struct mutex usb_bus_list_lock;
extern wait_queue_head_t usb_kill_urb_queue;
-extern int usb_find_interface_driver(struct usb_device *dev,
- struct usb_interface *interface);
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
diff --git a/kernel/include/linux/usb/msm_hsusb.h b/kernel/include/linux/usb/msm_hsusb.h
index 7dbecf9a4..8c8f6854c 100644
--- a/kernel/include/linux/usb/msm_hsusb.h
+++ b/kernel/include/linux/usb/msm_hsusb.h
@@ -18,6 +18,7 @@
#ifndef __ASM_ARCH_MSM_HSUSB_H
#define __ASM_ARCH_MSM_HSUSB_H
+#include <linux/extcon.h>
#include <linux/types.h>
#include <linux/usb/otg.h>
#include <linux/clk.h>
@@ -120,6 +121,17 @@ struct msm_otg_platform_data {
};
/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ * state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+ struct notifier_block nb;
+ struct extcon_dev *extcon;
+};
+
+/**
* struct msm_otg: OTG driver data. Shared by HCD and DCD.
* @otg: USB OTG Transceiver structure.
* @pdata: otg device platform data.
@@ -138,6 +150,15 @@ struct msm_otg_platform_data {
* @chg_type: The type of charger attached.
* @dcd_retires: The retry count used to track Data contact
* detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ * and controller driver therefore enables pull-up explicitly before
+ * starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
*/
struct msm_otg {
struct usb_phy phy;
@@ -166,6 +187,14 @@ struct msm_otg {
struct reset_control *phy_rst;
struct reset_control *link_rst;
int vdd_levels[3];
+
+ bool manual_pullup;
+
+ struct msm_usb_cable vbus;
+ struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
};
#endif
diff --git a/kernel/include/linux/usb/msm_hsusb_hw.h b/kernel/include/linux/usb/msm_hsusb_hw.h
index a29f6030a..e159b39f6 100644
--- a/kernel/include/linux/usb/msm_hsusb_hw.h
+++ b/kernel/include/linux/usb/msm_hsusb_hw.h
@@ -21,6 +21,8 @@
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
+#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
+
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
#define USB_USBCMD (MSM_USB_BASE + 0x0140)
@@ -30,6 +32,9 @@
#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
+#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
+#define USBCMD_SESS_VLD_CTRL BIT(25)
+
#define USBCMD_RESET 2
#define USB_USBINTR (MSM_USB_BASE + 0x0148)
@@ -50,6 +55,10 @@
#define ULPI_PWR_CLK_MNG_REG 0x88
#define OTG_COMP_DISABLE BIT(0)
+#define ULPI_MISC_A 0x96
+#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
+#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
+
#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
diff --git a/kernel/include/linux/usb/musb.h b/kernel/include/linux/usb/musb.h
index a4ee1b582..fa6dc132b 100644
--- a/kernel/include/linux/usb/musb.h
+++ b/kernel/include/linux/usb/musb.h
@@ -95,7 +95,7 @@ struct musb_hdrc_config {
/* musb CLKIN in Blackfin in MHZ */
unsigned char clkin;
#endif
-
+ u32 maximum_speed;
};
struct musb_hdrc_platform_data {
diff --git a/kernel/include/linux/usb/net2280.h b/kernel/include/linux/usb/net2280.h
index 148b8fa5b..725120224 100644
--- a/kernel/include/linux/usb/net2280.h
+++ b/kernel/include/linux/usb/net2280.h
@@ -168,6 +168,9 @@ struct net2280_regs {
#define ENDPOINT_B_INTERRUPT 2
#define ENDPOINT_A_INTERRUPT 1
#define ENDPOINT_0_INTERRUPT 0
+#define USB3380_IRQSTAT0_EP_INTR_MASK_IN (0xF << 17)
+#define USB3380_IRQSTAT0_EP_INTR_MASK_OUT (0xF << 1)
+
u32 irqstat1;
#define POWER_STATE_CHANGE_INTERRUPT 27
#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
diff --git a/kernel/include/linux/usb/of.h b/kernel/include/linux/usb/of.h
index cfe0528cd..c3fe9e48c 100644
--- a/kernel/include/linux/usb/of.h
+++ b/kernel/include/linux/usb/of.h
@@ -12,24 +12,19 @@
#include <linux/usb/phy.h>
#if IS_ENABLED(CONFIG_OF)
-enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
-enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
bool of_usb_host_tpl_support(struct device_node *np);
+int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps);
#else
-static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
-{
- return USB_DR_MODE_UNKNOWN;
-}
-
-static inline enum usb_device_speed
-of_usb_get_maximum_speed(struct device_node *np)
-{
- return USB_SPEED_UNKNOWN;
-}
static inline bool of_usb_host_tpl_support(struct device_node *np)
{
return false;
}
+static inline int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/kernel/include/linux/usb/otg.h b/kernel/include/linux/usb/otg.h
index 52661c5da..67929df86 100644
--- a/kernel/include/linux/usb/otg.h
+++ b/kernel/include/linux/usb/otg.h
@@ -41,6 +41,21 @@ struct usb_otg {
};
+/**
+ * struct usb_otg_caps - describes the otg capabilities of the device
+ * @otg_rev: The OTG revision number the device is compliant with, it's
+ * in binary-coded decimal (i.e. 2.0 is 0200H).
+ * @hnp_support: Indicates if the device supports HNP.
+ * @srp_support: Indicates if the device supports SRP.
+ * @adp_support: Indicates if the device supports ADP.
+ */
+struct usb_otg_caps {
+ u16 otg_rev;
+ bool hnp_support;
+ bool srp_support;
+ bool adp_support;
+};
+
extern const char *usb_otg_state_string(enum usb_otg_state state);
/* Context: can sleep */
@@ -104,4 +119,13 @@ enum usb_dr_mode {
USB_DR_MODE_OTG,
};
+/**
+ * usb_get_dr_mode - Get dual role mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'dr_mode',
+ * and returns the correspondig enum usb_dr_mode
+ */
+extern enum usb_dr_mode usb_get_dr_mode(struct device *dev);
+
#endif /* __LINUX_USB_OTG_H */
diff --git a/kernel/include/linux/usb/phy.h b/kernel/include/linux/usb/phy.h
index bc91b5d38..31a8068c4 100644
--- a/kernel/include/linux/usb/phy.h
+++ b/kernel/include/linux/usb/phy.h
@@ -63,7 +63,7 @@ enum usb_otg_state {
struct usb_phy;
struct usb_otg;
-/* for transceivers connected thru an ULPI interface, the user must
+/* for phys connected thru an ULPI interface, the user must
* provide access ops
*/
struct usb_phy_io_ops {
@@ -92,10 +92,10 @@ struct usb_phy {
u16 port_status;
u16 port_change;
- /* to support controllers that have multiple transceivers */
+ /* to support controllers that have multiple phys */
struct list_head head;
- /* initialize/shutdown the OTG controller */
+ /* initialize/shutdown the phy */
int (*init)(struct usb_phy *x);
void (*shutdown)(struct usb_phy *x);
@@ -106,7 +106,7 @@ struct usb_phy {
int (*set_power)(struct usb_phy *x,
unsigned mA);
- /* Set transceiver into suspend mode */
+ /* Set phy into suspend mode */
int (*set_suspend)(struct usb_phy *x,
int suspend);
@@ -205,6 +205,8 @@ extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index);
extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index);
extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index);
+extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
+ struct device_node *node, struct notifier_block *nb);
extern void usb_put_phy(struct usb_phy *);
extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
@@ -238,6 +240,12 @@ static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
return ERR_PTR(-ENXIO);
}
+static inline struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
+ struct device_node *node, struct notifier_block *nb)
+{
+ return ERR_PTR(-ENXIO);
+}
+
static inline void usb_put_phy(struct usb_phy *x)
{
}
diff --git a/kernel/include/linux/usb/quirks.h b/kernel/include/linux/usb/quirks.h
index 9948c874e..1d0043dc3 100644
--- a/kernel/include/linux/usb/quirks.h
+++ b/kernel/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
+/* device can't handle Link Power Management */
+#define USB_QUIRK_NO_LPM BIT(10)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/kernel/include/linux/usb/renesas_usbhs.h b/kernel/include/linux/usb/renesas_usbhs.h
index f06529c14..bfb74723f 100644
--- a/kernel/include/linux/usb/renesas_usbhs.h
+++ b/kernel/include/linux/usb/renesas_usbhs.h
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
*/
int pio_dma_border; /* default is 64byte */
- u32 type;
+ uintptr_t type;
u32 enable_gpio;
/*
@@ -169,8 +169,7 @@ struct renesas_usbhs_driver_param {
#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */
};
-#define USBHS_TYPE_R8A7790 1
-#define USBHS_TYPE_R8A7791 2
+#define USBHS_TYPE_RCAR_GEN2 1
/*
* option:
diff --git a/kernel/include/linux/usb/ulpi.h b/kernel/include/linux/usb/ulpi.h
index 5c295c26a..5f07407a3 100644
--- a/kernel/include/linux/usb/ulpi.h
+++ b/kernel/include/linux/usb/ulpi.h
@@ -12,6 +12,8 @@
#define __LINUX_USB_ULPI_H
#include <linux/usb/otg.h>
+#include <linux/ulpi/regs.h>
+
/*-------------------------------------------------------------------------*/
/*
@@ -49,138 +51,6 @@
/*-------------------------------------------------------------------------*/
-/*
- * Macros for Set and Clear
- * See ULPI 1.1 specification to find the registers with Set and Clear offsets
- */
-#define ULPI_SET(a) (a + 1)
-#define ULPI_CLR(a) (a + 2)
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Register Map
- */
-#define ULPI_VENDOR_ID_LOW 0x00
-#define ULPI_VENDOR_ID_HIGH 0x01
-#define ULPI_PRODUCT_ID_LOW 0x02
-#define ULPI_PRODUCT_ID_HIGH 0x03
-#define ULPI_FUNC_CTRL 0x04
-#define ULPI_IFC_CTRL 0x07
-#define ULPI_OTG_CTRL 0x0a
-#define ULPI_USB_INT_EN_RISE 0x0d
-#define ULPI_USB_INT_EN_FALL 0x10
-#define ULPI_USB_INT_STS 0x13
-#define ULPI_USB_INT_LATCH 0x14
-#define ULPI_DEBUG 0x15
-#define ULPI_SCRATCH 0x16
-/* Optional Carkit Registers */
-#define ULPI_CARCIT_CTRL 0x19
-#define ULPI_CARCIT_INT_DELAY 0x1c
-#define ULPI_CARCIT_INT_EN 0x1d
-#define ULPI_CARCIT_INT_STS 0x20
-#define ULPI_CARCIT_INT_LATCH 0x21
-#define ULPI_CARCIT_PLS_CTRL 0x22
-/* Other Optional Registers */
-#define ULPI_TX_POS_WIDTH 0x25
-#define ULPI_TX_NEG_WIDTH 0x26
-#define ULPI_POLARITY_RECOVERY 0x27
-/* Access Extended Register Set */
-#define ULPI_ACCESS_EXTENDED 0x2f
-/* Vendor Specific */
-#define ULPI_VENDOR_SPECIFIC 0x30
-/* Extended Registers */
-#define ULPI_EXT_VENDOR_SPECIFIC 0x80
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Register Bits
- */
-
-/* Function Control */
-#define ULPI_FUNC_CTRL_XCVRSEL (1 << 0)
-#define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0)
-#define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0)
-#define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0)
-#define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0)
-#define ULPI_FUNC_CTRL_FS4LS (3 << 0)
-#define ULPI_FUNC_CTRL_TERMSELECT (1 << 2)
-#define ULPI_FUNC_CTRL_OPMODE (1 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3)
-#define ULPI_FUNC_CTRL_RESET (1 << 5)
-#define ULPI_FUNC_CTRL_SUSPENDM (1 << 6)
-
-/* Interface Control */
-#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0)
-#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1)
-#define ULPI_IFC_CTRL_CARKITMODE (1 << 2)
-#define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3)
-#define ULPI_IFC_CTRL_AUTORESUME (1 << 4)
-#define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5)
-#define ULPI_IFC_CTRL_PASSTHRU (1 << 6)
-#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7)
-
-/* OTG Control */
-#define ULPI_OTG_CTRL_ID_PULLUP (1 << 0)
-#define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1)
-#define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2)
-#define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3)
-#define ULPI_OTG_CTRL_CHRGVBUS (1 << 4)
-#define ULPI_OTG_CTRL_DRVVBUS (1 << 5)
-#define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6)
-#define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7)
-
-/* USB Interrupt Enable Rising,
- * USB Interrupt Enable Falling,
- * USB Interrupt Status and
- * USB Interrupt Latch
- */
-#define ULPI_INT_HOST_DISCONNECT (1 << 0)
-#define ULPI_INT_VBUS_VALID (1 << 1)
-#define ULPI_INT_SESS_VALID (1 << 2)
-#define ULPI_INT_SESS_END (1 << 3)
-#define ULPI_INT_IDGRD (1 << 4)
-
-/* Debug */
-#define ULPI_DEBUG_LINESTATE0 (1 << 0)
-#define ULPI_DEBUG_LINESTATE1 (1 << 1)
-
-/* Carkit Control */
-#define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0)
-#define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1)
-#define ULPI_CARKIT_CTRL_TXDEN (1 << 2)
-#define ULPI_CARKIT_CTRL_RXDEN (1 << 3)
-#define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4)
-#define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5)
-#define ULPI_CARKIT_CTRL_MICEN (1 << 6)
-
-/* Carkit Interrupt Enable */
-#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0)
-#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1)
-#define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2)
-#define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3)
-#define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4)
-
-/* Carkit Interrupt Status and
- * Carkit Interrupt Latch
- */
-#define ULPI_CARKIT_INT_IDFLOAT (1 << 0)
-#define ULPI_CARKIT_INT_CARINTDET (1 << 1)
-#define ULPI_CARKIT_INT_DP (1 << 2)
-
-/* Carkit Pulse Control*/
-#define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
-#define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
-#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
-#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
-
-/*-------------------------------------------------------------------------*/
-
#if IS_ENABLED(CONFIG_USB_ULPI)
struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags);
diff --git a/kernel/include/linux/usb/usb338x.h b/kernel/include/linux/usb/usb338x.h
index f92eb635b..11525d8d8 100644
--- a/kernel/include/linux/usb/usb338x.h
+++ b/kernel/include/linux/usb/usb338x.h
@@ -43,6 +43,10 @@
#define IN_ENDPOINT_TYPE 12
#define OUT_ENDPOINT_ENABLE 10
#define OUT_ENDPOINT_TYPE 8
+#define USB3380_EP_CFG_MASK_IN ((0x3 << IN_ENDPOINT_TYPE) | \
+ BIT(IN_ENDPOINT_ENABLE))
+#define USB3380_EP_CFG_MASK_OUT ((0x3 << OUT_ENDPOINT_TYPE) | \
+ BIT(OUT_ENDPOINT_ENABLE))
struct usb338x_usb_ext_regs {
u32 usbclass;
diff --git a/kernel/include/linux/userfaultfd_k.h b/kernel/include/linux/userfaultfd_k.h
new file mode 100644
index 000000000..587480ad4
--- /dev/null
+++ b/kernel/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
+/*
+ * include/linux/userfaultfd_k.h
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_K_H
+#define _LINUX_USERFAULTFD_K_H
+
+#ifdef CONFIG_USERFAULTFD
+
+#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
+
+#include <linux/fcntl.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from userfaultfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define UFFD_CLOEXEC O_CLOEXEC
+#define UFFD_NONBLOCK O_NONBLOCK
+
+#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+
+extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long reason);
+
+extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+ unsigned long src_start, unsigned long len);
+extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
+ unsigned long dst_start,
+ unsigned long len);
+
+/* mm helpers */
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_MISSING;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+}
+
+#else /* CONFIG_USERFAULTFD */
+
+/* mm helpers */
+static inline int handle_userfault(struct vm_area_struct *vma,
+ unsigned long address,
+ unsigned int flags,
+ unsigned long reason)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return true;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+#endif /* CONFIG_USERFAULTFD */
+
+#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/kernel/include/linux/verify_pefile.h b/kernel/include/linux/verify_pefile.h
index ac3481921..da2049b51 100644
--- a/kernel/include/linux/verify_pefile.h
+++ b/kernel/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
#ifndef _LINUX_VERIFY_PEFILE_H
#define _LINUX_VERIFY_PEFILE_H
+#include <crypto/public_key.h>
+
extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
- struct key *trusted_keyring, bool *_trusted);
+ struct key *trusted_keyring,
+ enum key_being_used_for usage,
+ bool *_trusted);
#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/kernel/include/linux/vga_switcheroo.h b/kernel/include/linux/vga_switcheroo.h
index b483abd34..69e1d4a1f 100644
--- a/kernel/include/linux/vga_switcheroo.h
+++ b/kernel/include/linux/vga_switcheroo.h
@@ -1,10 +1,31 @@
/*
+ * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
+ *
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
- * Licensed under GPLv2
+ * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
*
- * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
*/
#ifndef _LINUX_VGA_SWITCHEROO_H_
@@ -14,28 +35,85 @@
struct pci_dev;
+/**
+ * enum vga_switcheroo_state - client power state
+ * @VGA_SWITCHEROO_OFF: off
+ * @VGA_SWITCHEROO_ON: on
+ * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
+ * Only used in vga_switcheroo_get_client_state() which in turn is only
+ * called from hda_intel.c
+ *
+ * Client power state.
+ */
enum vga_switcheroo_state {
VGA_SWITCHEROO_OFF,
VGA_SWITCHEROO_ON,
/* below are referred only from vga_switcheroo_get_client_state() */
- VGA_SWITCHEROO_INIT,
VGA_SWITCHEROO_NOT_FOUND,
};
+/**
+ * enum vga_switcheroo_client_id - client identifier
+ * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
+ * Determining the id requires the handler, so GPUs are given their
+ * true id in a delayed fashion in vga_switcheroo_enable()
+ * @VGA_SWITCHEROO_IGD: integrated graphics device
+ * @VGA_SWITCHEROO_DIS: discrete graphics device
+ * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
+ *
+ * Client identifier. Audio clients use the same identifier & 0x100.
+ */
enum vga_switcheroo_client_id {
+ VGA_SWITCHEROO_UNKNOWN_ID = -1,
VGA_SWITCHEROO_IGD,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
+/**
+ * struct vga_switcheroo_handler - handler callbacks
+ * @init: initialize handler.
+ * Optional. This gets called when vga_switcheroo is enabled, i.e. when
+ * two vga clients have registered. It allows the handler to perform
+ * some delayed initialization that depends on the existence of the
+ * vga clients. Currently only the radeon and amdgpu drivers use this.
+ * The return value is ignored
+ * @switchto: switch outputs to given client.
+ * Mandatory. For muxless machines this should be a no-op. Returning 0
+ * denotes success, anything else failure (in which case the switch is
+ * aborted)
+ * @power_state: cut or reinstate power of given client.
+ * Optional. The return value is ignored
+ * @get_client_id: determine if given pci device is integrated or discrete GPU.
+ * Mandatory
+ *
+ * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
+ * methods are mandatory, all others may be set to NULL.
+ */
struct vga_switcheroo_handler {
+ int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
- int (*init)(void);
- int (*get_client_id)(struct pci_dev *pdev);
+ enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
};
+/**
+ * struct vga_switcheroo_client_ops - client callbacks
+ * @set_gpu_state: do the equivalent of suspend/resume for the card.
+ * Mandatory. This should not cut power to the discrete GPU,
+ * which is the job of the handler
+ * @reprobe: poll outputs.
+ * Optional. This gets called after waking the GPU and switching
+ * the outputs to it
+ * @can_switch: check if the device is in a position to switch now.
+ * Mandatory. The client should return false if a user space process
+ * has one of its device files open
+ *
+ * Client callbacks. A client can be either a GPU or an audio device on a GPU.
+ * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
+ * set to NULL. For audio clients, the @reprobe member is bogus.
+ */
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
@@ -49,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active);
+ enum vga_switcheroo_client_id id);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
void vga_switcheroo_unregister_handler(void);
int vga_switcheroo_process_delayed_switch(void);
-int vga_switcheroo_get_client_state(struct pci_dev *dev);
+enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -72,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active) { return 0; }
+ enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
-static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/kernel/include/linux/virtio_byteorder.h b/kernel/include/linux/virtio_byteorder.h
index 51865d05b..ce63a2c3a 100644
--- a/kernel/include/linux/virtio_byteorder.h
+++ b/kernel/include/linux/virtio_byteorder.h
@@ -3,17 +3,21 @@
#include <linux/types.h>
#include <uapi/linux/virtio_types.h>
-/*
- * Low-level memory accessors for handling virtio in modern little endian and in
- * compatibility native endian format.
- */
+static inline bool virtio_legacy_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
{
if (little_endian)
return le16_to_cpu((__force __le16)val);
else
- return (__force u16)val;
+ return be16_to_cpu((__force __be16)val);
}
static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
@@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
if (little_endian)
return (__force __virtio16)cpu_to_le16(val);
else
- return (__force __virtio16)val;
+ return (__force __virtio16)cpu_to_be16(val);
}
static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
@@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
if (little_endian)
return le32_to_cpu((__force __le32)val);
else
- return (__force u32)val;
+ return be32_to_cpu((__force __be32)val);
}
static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
@@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
if (little_endian)
return (__force __virtio32)cpu_to_le32(val);
else
- return (__force __virtio32)val;
+ return (__force __virtio32)cpu_to_be32(val);
}
static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
@@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
if (little_endian)
return le64_to_cpu((__force __le64)val);
else
- return (__force u64)val;
+ return be64_to_cpu((__force __be64)val);
}
static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
@@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
if (little_endian)
return (__force __virtio64)cpu_to_le64(val);
else
- return (__force __virtio64)val;
+ return (__force __virtio64)cpu_to_be64(val);
}
#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/kernel/include/linux/virtio_config.h b/kernel/include/linux/virtio_config.h
index 1e306f727..e5ce8ab0b 100644
--- a/kernel/include/linux/virtio_config.h
+++ b/kernel/include/linux/virtio_config.h
@@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+ virtio_legacy_is_little_endian();
+}
+
/* Memory accessors */
static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
{
- return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
{
- return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
}
static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
{
- return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
{
- return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
}
static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
{
- return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{
- return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
/* Config space accessors. */
diff --git a/kernel/include/linux/vm_event_item.h b/kernel/include/linux/vm_event_item.h
index 9246d32dc..e623d392d 100644
--- a/kernel/include/linux/vm_event_item.h
+++ b/kernel/include/linux/vm_event_item.h
@@ -14,12 +14,12 @@
#endif
#ifdef CONFIG_HIGHMEM
-#define HIGHMEM_ZONE(xx) , xx##_HIGH
+#define HIGHMEM_ZONE(xx) xx##_HIGH,
#else
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
diff --git a/kernel/include/linux/vmalloc.h b/kernel/include/linux/vmalloc.h
index 0ec598381..3bff87a25 100644
--- a/kernel/include/linux/vmalloc.h
+++ b/kernel/include/linux/vmalloc.h
@@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
# endif
#endif
-struct vmalloc_info {
- unsigned long used;
- unsigned long largest_chunk;
-};
-
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-extern void get_vmalloc_info(struct vmalloc_info *vmi);
#else
-
#define VMALLOC_TOTAL 0UL
-#define get_vmalloc_info(vmi) \
-do { \
- (vmi)->used = 0; \
- (vmi)->largest_chunk = 0; \
-} while (0)
#endif
#endif /* _LINUX_VMALLOC_H */
diff --git a/kernel/include/linux/vme.h b/kernel/include/linux/vme.h
index 79242e9c0..71e4a6dec 100644
--- a/kernel/include/linux/vme.h
+++ b/kernel/include/linux/vme.h
@@ -81,6 +81,9 @@ struct vme_resource {
extern struct bus_type vme_bus_type;
+/* Number of VME interrupt vectors */
+#define VME_NUM_STATUSID 256
+
/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
#define VME_MAX_SLOTS 32
@@ -120,6 +123,8 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
dma_addr_t);
size_t vme_get_size(struct vme_resource *);
+int vme_check_window(u32 aspace, unsigned long long vme_base,
+ unsigned long long size);
struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
diff --git a/kernel/include/linux/vmstat.h b/kernel/include/linux/vmstat.h
index 3feaf770a..7eaa847cd 100644
--- a/kernel/include/linux/vmstat.h
+++ b/kernel/include/linux/vmstat.h
@@ -165,30 +165,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
#ifdef CONFIG_NUMA
-/*
- * Determine the per node value of a stat item. This function
- * is called frequently in a NUMA machine, so try to be as
- * frugal as possible.
- */
-static inline unsigned long node_page_state(int node,
- enum zone_stat_item item)
-{
- struct zone *zones = NODE_DATA(node)->node_zones;
-
- return
-#ifdef CONFIG_ZONE_DMA
- zone_page_state(&zones[ZONE_DMA], item) +
-#endif
-#ifdef CONFIG_ZONE_DMA32
- zone_page_state(&zones[ZONE_DMA32], item) +
-#endif
-#ifdef CONFIG_HIGHMEM
- zone_page_state(&zones[ZONE_HIGHMEM], item) +
-#endif
- zone_page_state(&zones[ZONE_NORMAL], item) +
- zone_page_state(&zones[ZONE_MOVABLE], item);
-}
+extern unsigned long node_page_state(int node, enum zone_stat_item item);
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#else
@@ -202,11 +180,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
#ifdef CONFIG_SMP
-void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
+void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
-void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
+void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
@@ -231,7 +209,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
* The functions directly modify the zone and global counters.
*/
static inline void __mod_zone_page_state(struct zone *zone,
- enum zone_stat_item item, int delta)
+ enum zone_stat_item item, long delta)
{
zone_page_state_add(delta, zone, item);
}
@@ -273,7 +251,6 @@ static inline void __dec_zone_page_state(struct page *page,
#define set_pgdat_percpu_threshold(pgdat, callback) { }
-static inline void refresh_cpu_vm_stats(int cpu) { }
static inline void refresh_zone_stat_thresholds(void) { }
static inline void cpu_vm_stats_fold(int cpu) { }
diff --git a/kernel/include/linux/vringh.h b/kernel/include/linux/vringh.h
index a3fa537e7..bc6c28d04 100644
--- a/kernel/include/linux/vringh.h
+++ b/kernel/include/linux/vringh.h
@@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh)
vrh->notify(vrh);
}
+static inline bool vringh_is_little_endian(const struct vringh *vrh)
+{
+ return vrh->little_endian ||
+ virtio_legacy_is_little_endian();
+}
+
static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
{
- return __virtio16_to_cpu(vrh->little_endian, val);
+ return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
{
- return __cpu_to_virtio16(vrh->little_endian, val);
+ return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
}
static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
{
- return __virtio32_to_cpu(vrh->little_endian, val);
+ return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
{
- return __cpu_to_virtio32(vrh->little_endian, val);
+ return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
}
static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
{
- return __virtio64_to_cpu(vrh->little_endian, val);
+ return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
{
- return __cpu_to_virtio64(vrh->little_endian, val);
+ return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
}
#endif /* _LINUX_VRINGH_H */
diff --git a/kernel/include/linux/wait-simple.h b/kernel/include/linux/wait-simple.h
deleted file mode 100644
index f86bca2c4..000000000
--- a/kernel/include/linux/wait-simple.h
+++ /dev/null
@@ -1,207 +0,0 @@
-#ifndef _LINUX_WAIT_SIMPLE_H
-#define _LINUX_WAIT_SIMPLE_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <asm/current.h>
-
-struct swaiter {
- struct task_struct *task;
- struct list_head node;
-};
-
-#define DEFINE_SWAITER(name) \
- struct swaiter name = { \
- .task = current, \
- .node = LIST_HEAD_INIT((name).node), \
- }
-
-struct swait_head {
- raw_spinlock_t lock;
- struct list_head list;
-};
-
-#define SWAIT_HEAD_INITIALIZER(name) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .list = LIST_HEAD_INIT((name).list), \
- }
-
-#define DEFINE_SWAIT_HEAD(name) \
- struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
-
-extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
-
-#define init_swait_head(swh) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_swait_head((swh), &__key); \
- } while (0)
-
-/*
- * Waiter functions
- */
-extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
-extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_finish(struct swait_head *head, struct swaiter *w);
-
-/* Check whether a head has waiters enqueued */
-static inline bool swaitqueue_active(struct swait_head *h)
-{
- /* Make sure the condition is visible before checking list_empty() */
- smp_mb();
- return !list_empty(&h->list);
-}
-
-/*
- * Wakeup functions
- */
-extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
-extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
-
-#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
-#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
-#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
-#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
-
-/*
- * Event API
- */
-#define __swait_event(wq, condition) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __swait_event(wq, condition); \
-} while (0)
-
-#define __swait_event_interruptible(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-#define __swait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __swait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- int __ret = timeout; \
- if (!(condition)) \
- __swait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __swait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
- */
-#define swait_event_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __swait_event_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#endif
diff --git a/kernel/include/linux/wait.h b/kernel/include/linux/wait.h
index b3b54c26b..981c8a840 100644
--- a/kernel/include/linux/wait.h
+++ b/kernel/include/linux/wait.h
@@ -146,7 +146,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
list_del(&old->task_list);
}
-typedef int wait_bit_action_f(struct wait_bit_key *);
+typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
@@ -359,6 +359,19 @@ do { \
__ret; \
})
+#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
+ cmd1; schedule(); cmd2)
+/*
+ * Just like wait_event_cmd(), except it sets exclusive flag
+ */
+#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
+} while (0)
+
#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
@@ -948,10 +961,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
} while (0)
-extern int bit_wait(struct wait_bit_key *);
-extern int bit_wait_io(struct wait_bit_key *);
-extern int bit_wait_timeout(struct wait_bit_key *);
-extern int bit_wait_io_timeout(struct wait_bit_key *);
+extern int bit_wait(struct wait_bit_key *, int);
+extern int bit_wait_io(struct wait_bit_key *, int);
+extern int bit_wait_timeout(struct wait_bit_key *, int);
+extern int bit_wait_io_timeout(struct wait_bit_key *, int);
/**
* wait_on_bit - wait for a bit to be cleared
@@ -970,7 +983,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
* on that signal.
*/
static inline int
-wait_on_bit(void *word, int bit, unsigned mode)
+wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -995,7 +1008,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
* on that signal.
*/
static inline int
-wait_on_bit_io(void *word, int bit, unsigned mode)
+wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1021,7 +1034,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
* received a signal and the mode permitted wakeup on that signal.
*/
static inline int
-wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
+ unsigned long timeout)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1048,7 +1062,8 @@ wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
* on that signal.
*/
static inline int
-wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1076,7 +1091,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock(void *word, int bit, unsigned mode)
+wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1100,7 +1115,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1126,7 +1141,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
diff --git a/kernel/include/linux/watchdog.h b/kernel/include/linux/watchdog.h
index a746bf521..027b1f43f 100644
--- a/kernel/include/linux/watchdog.h
+++ b/kernel/include/linux/watchdog.h
@@ -24,8 +24,8 @@ struct watchdog_device;
* @stop: The routine for stopping the watchdog device.
* @ping: The routine that sends a keepalive ping to the watchdog device.
* @status: The routine that shows the status of the watchdog device.
- * @set_timeout:The routine for setting the watchdog devices timeout value.
- * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds).
+ * @get_timeleft:The routine that gets the time left before a reset (in seconds).
* @ref: The ref operation for dyn. allocated watchdog_device structs
* @unref: The unref operation for dyn. allocated watchdog_device structs
* @ioctl: The routines that handles extra ioctl calls.
@@ -33,7 +33,7 @@ struct watchdog_device;
* The watchdog_ops structure contains a list of low-level operations
* that control a watchdog device. It also contains the module that owns
* these operations. The start and stop function are mandatory, all other
- * functions are optonal.
+ * functions are optional.
*/
struct watchdog_ops {
struct module *owner;
@@ -59,12 +59,14 @@ struct watchdog_ops {
* @info: Pointer to a watchdog_info structure.
* @ops: Pointer to the list of watchdog operations.
* @bootstatus: Status of the watchdog device at boot.
- * @timeout: The watchdog devices timeout value.
- * @min_timeout:The watchdog devices minimum timeout value.
- * @max_timeout:The watchdog devices maximum timeout value.
+ * @timeout: The watchdog devices timeout value (in seconds).
+ * @min_timeout:The watchdog devices minimum timeout value (in seconds).
+ * @max_timeout:The watchdog devices maximum timeout value (in seconds).
* @driver-data:Pointer to the drivers private data.
* @lock: Lock for watchdog core internal use only.
* @status: Field that contains the devices internal status bits.
+ * @deferred: entry in wtd_deferred_reg_list which is used to
+ * register early initialized watchdogs.
*
* The watchdog_device structure contains all information about a
* watchdog timer device.
@@ -95,6 +97,7 @@ struct watchdog_device {
#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
+ struct list_head deferred;
};
#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
@@ -116,8 +119,15 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway
/* Use the following function to check if a timeout value is invalid */
static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
{
- return ((wdd->max_timeout != 0) &&
- (t < wdd->min_timeout || t > wdd->max_timeout));
+ /*
+ * The timeout is invalid if
+ * - the requested value is smaller than the configured minimum timeout,
+ * or
+ * - a maximum timeout is configured, and the requested value is larger
+ * than the maximum timeout.
+ */
+ return t < wdd->min_timeout ||
+ (wdd->max_timeout && t > wdd->max_timeout);
}
/* Use the following functions to manipulate watchdog driver specific data */
@@ -137,12 +147,4 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void watchdog_nmi_disable_all(void);
-void watchdog_nmi_enable_all(void);
-#else
-static inline void watchdog_nmi_disable_all(void) {}
-static inline void watchdog_nmi_enable_all(void) {}
-#endif
-
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/kernel/include/linux/workqueue.h b/kernel/include/linux/workqueue.h
index deee212af..0197358f1 100644
--- a/kernel/include/linux/workqueue.h
+++ b/kernel/include/linux/workqueue.h
@@ -265,7 +265,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
/**
* delayed_work_pending - Find out whether a delayable work item is currently
* pending
- * @work: The work item in question
+ * @w: The work item in question
*/
#define delayed_work_pending(w) \
work_pending(&(w)->work)
@@ -366,7 +366,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
* information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -424,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
+int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
@@ -434,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
-extern void flush_scheduled_work(void);
extern int schedule_on_each_cpu(work_func_t func);
@@ -531,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work)
}
/**
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
+ *
+ * Forces execution of the kernel-global workqueue and blocks until its
+ * completion.
+ *
+ * Think twice before calling this function! It's very easy to get into
+ * trouble if you don't take great care. Either of the following situations
+ * will lead to deadlock:
+ *
+ * One of the work items currently on the workqueue needs to acquire
+ * a lock held by your code or its caller.
+ *
+ * Your code is running in the context of a work routine.
+ *
+ * They will be detected by lockdep when they occur, but the first might not
+ * occur very often. It depends on what work items are on the workqueue and
+ * what locks they need, which you have no control over.
+ *
+ * In most situations flushing the entire workqueue is overkill; you merely
+ * need to know that a particular work item isn't queued and isn't running.
+ * In such cases you should use cancel_delayed_work_sync() or
+ * cancel_work_sync() instead.
+ */
+static inline void flush_scheduled_work(void)
+{
+ flush_workqueue(system_wq);
+}
+
+/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
diff --git a/kernel/include/linux/writeback.h b/kernel/include/linux/writeback.h
index b2dd371ec..d0b5ca5d4 100644
--- a/kernel/include/linux/writeback.h
+++ b/kernel/include/linux/writeback.h
@@ -7,6 +7,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
+#include <linux/flex_proportions.h>
+#include <linux/backing-dev-defs.h>
DECLARE_PER_CPU(int, dirty_throttle_leaks);
@@ -84,8 +86,85 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *wb; /* wb this writeback is issued under */
+ struct inode *inode; /* inode being written out */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int wb_id; /* current wb id */
+ int wb_lcand_id; /* last foreign candidate wb id */
+ int wb_tcand_id; /* this foreign candidate wb id */
+ size_t wb_bytes; /* bytes written by current wb */
+ size_t wb_lcand_bytes; /* bytes written by last candidate */
+ size_t wb_tcand_bytes; /* bytes written by this candidate */
+#endif
+};
+
+/*
+ * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
+ * and are measured against each other in. There always is one global
+ * domain, global_wb_domain, that every wb in the system is a member of.
+ * This allows measuring the relative bandwidth of each wb to distribute
+ * dirtyable memory accordingly.
+ */
+struct wb_domain {
+ spinlock_t lock;
+
+ /*
+ * Scale the writeback cache size proportional to the relative
+ * writeout speed.
+ *
+ * We do this by keeping a floating proportion between BDIs, based
+ * on page writeback completions [end_page_writeback()]. Those
+ * devices that write out pages fastest will get the larger share,
+ * while the slower will get a smaller share.
+ *
+ * We use page writeout completions because we are interested in
+ * getting rid of dirty pages. Having them written out is the
+ * primary goal.
+ *
+ * We introduce a concept of time, a period over which we measure
+ * these events, because demand can/will vary over time. The length
+ * of this period itself is measured in page writeback completions.
+ */
+ struct fprop_global completions;
+ struct timer_list period_timer; /* timer for aging of completions */
+ unsigned long period_time;
+
+ /*
+ * The dirtyable memory and dirty threshold could be suddenly
+ * knocked down by a large amount (eg. on the startup of KVM in a
+ * swapless system). This may throw the system into deep dirty
+ * exceeded state and throttle heavy/light dirtiers alike. To
+ * retain good responsiveness, maintain global_dirty_limit for
+ * tracking slowly down to the knocked down dirty threshold.
+ *
+ * Both fields are protected by ->lock.
+ */
+ unsigned long dirty_limit_tstamp;
+ unsigned long dirty_limit;
};
+/**
+ * wb_domain_size_changed - memory available to a wb_domain has changed
+ * @dom: wb_domain of interest
+ *
+ * This function should be called when the amount of memory available to
+ * @dom has changed. It resets @dom's dirty limit parameters to prevent
+ * the past values which don't match the current configuration from skewing
+ * dirty throttling. Without this, when memory size of a wb_domain is
+ * greatly reduced, the dirty throttling logic may allow too many pages to
+ * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
+ * that situation.
+ */
+static inline void wb_domain_size_changed(struct wb_domain *dom)
+{
+ spin_lock(&dom->lock);
+ dom->dirty_limit_tstamp = jiffies;
+ dom->dirty_limit = 0;
+ spin_unlock(&dom->lock);
+}
+
/*
* fs/fs-writeback.c
*/
@@ -93,9 +172,9 @@ struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
-int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
- enum wb_reason reason);
+bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
@@ -107,6 +186,128 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+#include <linux/cgroup.h>
+#include <linux/bio.h>
+
+void __inode_attach_wb(struct inode *inode, struct page *page);
+void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock);
+void wbc_detach_inode(struct writeback_control *wbc);
+void wbc_account_io(struct writeback_control *wbc, struct page *page,
+ size_t bytes);
+void cgroup_writeback_umount(void);
+
+/**
+ * inode_attach_wb - associate an inode with its wb
+ * @inode: inode of interest
+ * @page: page being dirtied (may be NULL)
+ *
+ * If @inode doesn't have its wb, associate it with the wb matching the
+ * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
+ * @inode->i_lock.
+ */
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+ if (!inode->i_wb)
+ __inode_attach_wb(inode, page);
+}
+
+/**
+ * inode_detach_wb - disassociate an inode from its wb
+ * @inode: inode of interest
+ *
+ * @inode is being freed. Detach from its wb.
+ */
+static inline void inode_detach_wb(struct inode *inode)
+{
+ if (inode->i_wb) {
+ wb_put(inode->i_wb);
+ inode->i_wb = NULL;
+ }
+}
+
+/**
+ * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * This function is to be used by __filemap_fdatawrite_range(), which is an
+ * alternative entry point into writeback code, and first ensures @inode is
+ * associated with a bdi_writeback and attaches it to @wbc.
+ */
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ inode_attach_wb(inode, NULL);
+ wbc_attach_and_unlock_inode(wbc, inode);
+}
+
+/**
+ * wbc_init_bio - writeback specific initializtion of bio
+ * @wbc: writeback_control for the writeback in progress
+ * @bio: bio to be initialized
+ *
+ * @bio is a part of the writeback in progress controlled by @wbc. Perform
+ * writeback specific initialization. This is used to apply the cgroup
+ * writeback context.
+ */
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+ /*
+ * pageout() path doesn't attach @wbc to the inode being written
+ * out. This is intentional as we don't want the function to block
+ * behind a slow cgroup. Ultimately, we want pageout() to kick off
+ * regular writeback instead of writing things out itself.
+ */
+ if (wbc->wb)
+ bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+}
+
+static inline void inode_detach_wb(struct inode *inode)
+{
+}
+
+static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock)
+{
+ spin_unlock(&inode->i_lock);
+}
+
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+}
+
+static inline void wbc_detach_inode(struct writeback_control *wbc)
+{
+}
+
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+}
+
+static inline void wbc_account_io(struct writeback_control *wbc,
+ struct page *page, size_t bytes)
+{
+}
+
+static inline void cgroup_writeback_umount(void)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
/*
* mm/page-writeback.c
*/
@@ -120,8 +321,12 @@ static inline void laptop_sync_completion(void) { }
#endif
void throttle_vm_writeout(gfp_t gfp_mask);
bool zone_dirty_ok(struct zone *zone);
+int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
+#ifdef CONFIG_CGROUP_WRITEBACK
+void wb_domain_exit(struct wb_domain *dom);
+#endif
-extern unsigned long global_dirty_limit;
+extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern int dirty_background_ratio;
@@ -155,19 +360,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
-unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
- unsigned long dirty);
-
-void __bdi_update_bandwidth(struct backing_dev_info *bdi,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
- unsigned long start_time);
+unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
+void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+bool wb_over_bg_thresh(struct bdi_writeback *wb);
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
diff --git a/kernel/include/linux/xattr.h b/kernel/include/linux/xattr.h
index 91b0a68d3..89474b9d2 100644
--- a/kernel/include/linux/xattr.h
+++ b/kernel/include/linux/xattr.h
@@ -21,15 +21,19 @@ struct dentry;
struct xattr_handler {
const char *prefix;
- int flags; /* fs private flags passed back to the handlers */
- size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
- const char *name, size_t name_len, int handler_flags);
- int (*get)(struct dentry *dentry, const char *name, void *buffer,
- size_t size, int handler_flags);
- int (*set)(struct dentry *dentry, const char *name, const void *buffer,
- size_t size, int flags, int handler_flags);
+ int flags; /* fs private flags */
+ size_t (*list)(const struct xattr_handler *, struct dentry *dentry,
+ char *list, size_t list_size, const char *name,
+ size_t name_len);
+ int (*get)(const struct xattr_handler *, struct dentry *dentry,
+ const char *name, void *buffer, size_t size);
+ int (*set)(const struct xattr_handler *, struct dentry *dentry,
+ const char *name, const void *buffer, size_t size,
+ int flags);
};
+const char *xattr_full_name(const struct xattr_handler *, const char *);
+
struct xattr {
const char *name;
void *value;
diff --git a/kernel/include/linux/zbud.h b/kernel/include/linux/zbud.h
index f9d41a6e3..e183a0a65 100644
--- a/kernel/include/linux/zbud.h
+++ b/kernel/include/linux/zbud.h
@@ -9,7 +9,7 @@ struct zbud_ops {
int (*evict)(struct zbud_pool *pool, unsigned long handle);
};
-struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
void zbud_destroy_pool(struct zbud_pool *pool);
int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
diff --git a/kernel/include/linux/zpool.h b/kernel/include/linux/zpool.h
index 56529b34d..2e97b7707 100644
--- a/kernel/include/linux/zpool.h
+++ b/kernel/include/linux/zpool.h
@@ -36,10 +36,12 @@ enum zpool_mapmode {
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
};
-struct zpool *zpool_create_pool(char *type, char *name,
- gfp_t gfp, struct zpool_ops *ops);
+bool zpool_has_pool(char *type);
-char *zpool_get_type(struct zpool *pool);
+struct zpool *zpool_create_pool(const char *type, const char *name,
+ gfp_t gfp, const struct zpool_ops *ops);
+
+const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
@@ -81,7 +83,10 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops);
+ void *(*create)(const char *name,
+ gfp_t gfp,
+ const struct zpool_ops *ops,
+ struct zpool *zpool);
void (*destroy)(void *pool);
int (*malloc)(void *pool, size_t size, gfp_t gfp,
@@ -102,6 +107,4 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
-int zpool_evict(void *pool, unsigned long handle);
-
#endif
diff --git a/kernel/include/linux/zsmalloc.h b/kernel/include/linux/zsmalloc.h
index 1338190b5..34eb16098 100644
--- a/kernel/include/linux/zsmalloc.h
+++ b/kernel/include/linux/zsmalloc.h
@@ -34,9 +34,14 @@ enum zs_mapmode {
*/
};
+struct zs_pool_stats {
+ /* How many pages were migrated (freed) */
+ unsigned long pages_compacted;
+};
+
struct zs_pool;
-struct zs_pool *zs_create_pool(char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
@@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
diff --git a/kernel/include/linux/zutil.h b/kernel/include/linux/zutil.h
index 6adfa9a6f..663689521 100644
--- a/kernel/include/linux/zutil.h
+++ b/kernel/include/linux/zutil.h
@@ -68,10 +68,10 @@ typedef uLong (*check_func) (uLong check, const Byte *buf,
An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
much faster. Usage example:
- uLong adler = adler32(0L, NULL, 0);
+ uLong adler = zlib_adler32(0L, NULL, 0);
while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
+ adler = zlib_adler32(adler, buffer, length);
}
if (adler != original_adler) error();
*/