/linux-6.14.4/drivers/staging/greybus/ |
D | loopback.c | 53 struct gb_loopback *gb; member 108 #define GB_LOOPBACK_TIMEOUT_MIN 1 127 struct gb_loopback *gb = dev_get_drvdata(dev); \ 128 return sprintf(buf, "%u\n", gb->field); \ 137 struct gb_loopback *gb = dev_get_drvdata(dev); \ 139 if (!gb->requests_completed) \ 141 return sprintf(buf, "%" #type "\n", gb->name.field); \ 151 struct gb_loopback *gb; \ 154 gb = dev_get_drvdata(dev); \ 155 stats = &gb->name; \ [all …]
|
D | audio_topology.c | 133 static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb, in gb_generate_enum_strings() argument 142 strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL); in gb_generate_enum_strings() 192 if (uinfo->value.enumerated.item > max - 1) in gbcodec_mixer_ctl_info() 193 uinfo->value.enumerated.item = max - 1; in gbcodec_mixer_ctl_info() 218 struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp); in gbcodec_mixer_ctl_get() local 222 module = find_gb_module(gb, kcontrol->id.name); in gbcodec_mixer_ctl_get() 252 ucontrol->value.integer.value[1] = in gbcodec_mixer_ctl_get() 253 le32_to_cpu(gbvalue.value.integer_value[1]); in gbcodec_mixer_ctl_get() 259 ucontrol->value.enumerated.item[1] = in gbcodec_mixer_ctl_get() 260 le32_to_cpu(gbvalue.value.enumerated_item[1]); in gbcodec_mixer_ctl_get() [all …]
|
/linux-6.14.4/drivers/net/ethernet/intel/ice/ |
D | ice_ethtool.h | 72 [1] = ICE_PHY_TYPE(100MB, 100baseT_Full), 81 [10] = ICE_PHY_TYPE(5GB, 5000baseT_Full), 82 [11] = ICE_PHY_TYPE(5GB, 5000baseT_Full), 83 [12] = ICE_PHY_TYPE(10GB, 10000baseT_Full), 84 [13] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), 85 [14] = ICE_PHY_TYPE(10GB, 10000baseSR_Full), 86 [15] = ICE_PHY_TYPE(10GB, 10000baseLR_Full), 87 [16] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), 88 [17] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), 89 [18] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), [all …]
|
/linux-6.14.4/drivers/input/joystick/ |
D | sidewinder.c | 54 #define SW_ID_GP 1 76 { 10, 10, 9, 10, 1, 1 }, 77 { 1, 1 }, 78 { 10, 10, 6, 7, 1, 1 }, 79 { 10, 10, 6, 7, 1, 1 }, 80 { 10, 10, 6, 1, 1 }, 81 { 10, 7, 7, 1, 1 }}; 94 } sw_hat_to_axis[] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-… 165 pending = 1; /* Mark schedule */ in sw_read_packet() 191 * sw_get_bits() and GB() compose bits from the triplet buffer into a __u64. [all …]
|
D | gf2k.c | 33 #define GF2K_ID_G09 1 43 …char gf2k_hat_to_axis[][2] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, … 123 while ((gameport_read(gameport) & 1) && t) t--; in gf2k_trigger_seq() 139 #define GB(p,n,s) gf2k_get_bits(data, p, n, s) macro 149 data &= (1 << num) - 1; in gf2k_get_bits() 161 input_report_abs(dev, gf2k_abs[i], GB(i<<3,8,0) | GB(i+46,1,8) | GB(i+50,1,9)); in gf2k_read() 164 input_report_abs(dev, gf2k_abs[i], GB(i*9+60,8,0) | GB(i+54,1,9)); in gf2k_read() 166 t = GB(40,4,0); in gf2k_read() 171 t = GB(44,2,0) | GB(32,8,2) | GB(78,2,10); in gf2k_read() 174 input_report_key(dev, gf2k_btn_joy[i], (t >> i) & 1); in gf2k_read() [all …]
|
/linux-6.14.4/drivers/scsi/qla2xxx/ |
D | qla_devtbl.h | 8 "QLA2340", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x100 */ 9 "QLA2342", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x101 */ 10 "QLA2344", "133MHz PCI-X to 2Gb FC, Quad Channel", /* 0x102 */ 11 "QCP2342", "cPCI to 2Gb FC, Dual Channel", /* 0x103 */ 12 "QSB2340", "SBUS to 2Gb FC, Single Channel", /* 0x104 */ 13 "QSB2342", "SBUS to 2Gb FC, Dual Channel", /* 0x105 */ 14 "QLA2310", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x106 */ 15 "QLA2332", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x107 */ 16 "QCP2332", "Sun cPCI to 2Gb FC, Dual Channel", /* 0x108 */ 17 "QCP2340", "cPCI to 2Gb FC, Single Channel", /* 0x109 */ [all …]
|
/linux-6.14.4/drivers/mtd/nand/spi/ |
D | toshiba.c | 18 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 19 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 20 SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), 21 SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); 32 * Backward compatibility for 1st generation Serial NAND devices 110 /* 3.3V 1Gb (1st generation) */ 113 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), 121 /* 3.3V 2Gb (1st generation) */ 124 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), 132 /* 3.3V 4Gb (1st generation) */ [all …]
|
D | micron.c | 17 #define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) 33 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 34 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 35 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 36 SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), 37 SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); 49 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 50 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 51 SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), 52 SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); [all …]
|
/linux-6.14.4/drivers/md/dm-vdo/indexer/ |
D | geometry.c | 32 * For a small index with a memory footprint less than 1GB, there are three possible memory 33 * configurations: 0.25GB, 0.5GB and 0.75GB. The default geometry for each is 1024 index records 36 * the VDO default of a 0.25 GB index, this yields a deduplication window of 256 GB using about 2.5 37 * GB for the persistent storage and 256 MB of RAM. 39 * For a larger index with a memory footprint that is a multiple of 1 GB, the geometry is 1024 41 * chapters for every GB of memory footprint. For a 1 GB volume, this yields a deduplication window 42 * of 1 TB using about 9GB of persistent storage and 1 GB of RAM. 64 result = vdo_allocate(1, struct index_geometry, "geometry", &geometry); in uds_make_index_geometry() 80 geometry->chapter_mean_delta = 1 << DEFAULT_CHAPTER_MEAN_DELTA_BITS; in uds_make_index_geometry() 81 geometry->chapter_payload_bits = bits_per(record_pages_per_chapter - 1); in uds_make_index_geometry() [all …]
|
/linux-6.14.4/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ |
D | tlb.json | 62 … "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 1GB page." 67 …"BriefDescription": "This event counts operations that cause a TLB access to the L1I in 16GB page." 97 … "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 1GB page." 102 …"BriefDescription": "This event counts operations that cause a TLB access to the L1D in 16GB page." 132 … "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 1GB page." 137 …"BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 16GB page." 167 … "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 1GB page." 172 …"BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 16GB page." 202 … "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 1GB page." 207 …"BriefDescription": "This event counts operations that cause a TLB access to the L2I in 16GB page." [all …]
|
/linux-6.14.4/Documentation/admin-guide/cgroup-v1/ |
D | hugetlb.rst | 34 For a system supporting three hugepage sizes (64k, 32M and 1G), the control 37 hugetlb.1GB.limit_in_bytes 38 hugetlb.1GB.max_usage_in_bytes 39 hugetlb.1GB.numa_stat 40 hugetlb.1GB.usage_in_bytes 41 hugetlb.1GB.failcnt 42 hugetlb.1GB.rsvd.limit_in_bytes 43 hugetlb.1GB.rsvd.max_usage_in_bytes 44 hugetlb.1GB.rsvd.usage_in_bytes 45 hugetlb.1GB.rsvd.failcnt [all …]
|
/linux-6.14.4/Documentation/arch/riscv/ |
D | vm-layout.rst | 39 …0000000000000000 | 0 | 0000003fffffffff | 256 GB | user-space virtual memory, different … 42 …0000004000000000 | +256 GB | ffffffbfffffffff | ~16M TB | ... huge, almost 64 bits wide hole of… 43 … | | | | virtual memory addresses up to the -256 GB 50 ffffffc4fea00000 | -236 GB | ffffffc4feffffff | 6 MB | fixmap 51 ffffffc4ff000000 | -236 GB | ffffffc4ffffffff | 16 MB | PCI io 52 ffffffc500000000 | -236 GB | ffffffc5ffffffff | 4 GB | vmemmap 53 ffffffc600000000 | -232 GB | ffffffd5ffffffff | 64 GB | vmalloc/ioremap space 54 … ffffffd600000000 | -168 GB | fffffff5ffffffff | 128 GB | direct mapping of all physical memory 56 fffffff700000000 | -36 GB | fffffffeffffffff | 32 GB | kasan 62 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF [all …]
|
/linux-6.14.4/Documentation/arch/x86/x86_64/ |
D | mm.rst | 20 from TB to GB and then MB/KB. 60 ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole 61 ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base) 62 ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole 73 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks 74 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole 75 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space 76 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole 77 …ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physic… 143 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks [all …]
|
/linux-6.14.4/drivers/staging/greybus/Documentation/firmware/ |
D | firmware-management | 21 ; Firmware Management Bundle (Bundle 1): 22 [bundle-descriptor 1] 25 ; (Mandatory) Firmware Management Protocol on CPort 1 27 bundle = 1 31 [cport-descriptor 1] 32 bundle = 1 37 bundle = 1 42 bundle = 1 51 and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime. 56 There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N [all …]
|
/linux-6.14.4/fs/hfsplus/ |
D | btree.c | 30 /* 1GB */ 4, 4, 4, 31 /* 2GB */ 6, 6, 4, 32 /* 4GB */ 8, 8, 4, 33 /* 8GB */ 11, 11, 5, 35 * For volumes 16GB and larger, we want to make sure that a full OS 57 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times 58 * the previous term. For Attributes (16GB to 512GB), each term is 59 * 4**(1/5) times the previous term. For 1TB to 16TB, each term is 60 * 2**(1/5) times the previous term. 62 /* 16GB */ 64, 32, 5, [all …]
|
/linux-6.14.4/include/linux/ |
D | xxhash.h | 51 * xxHash 5.4 GB/s 10 52 * CrapWow 3.2 GB/s 2 Andrew 53 * MumurHash 3a 2.7 GB/s 10 Austin Appleby 54 * SpookyHash 2.0 GB/s 10 Bob Jenkins 55 * SBox 1.4 GB/s 9 Bret Mulvey 56 * Lookup3 1.2 GB/s 9 Bob Jenkins 57 * SuperFastHash 1.2 GB/s 1 Paul Hsieh 58 * CityHash64 1.05 GB/s 10 Pike & Alakuijala 59 * FNV 0.55 GB/s 5 Fowler, Noll, Vo 60 * CRC32 0.43 GB/s 9 [all …]
|
/linux-6.14.4/arch/powerpc/include/asm/book3s/64/ |
D | radix-4k.h | 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB 11 #define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
|
D | radix-64k.h | 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB 11 #define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
|
/linux-6.14.4/tools/testing/selftests/kvm/ |
D | mmu_stress_test.c | 107 TEST_ASSERT_EQ(uc.args[1], stage); in assert_sync_stage() 139 /* Stage 1, re-write all of guest memory. */ in vcpu_worker() 140 run_vcpu(vcpu, 1); in vcpu_worker() 154 TEST_ASSERT(r == -1 && errno == EFAULT, in vcpu_worker() 220 ~((uint64_t)vm->page_size - 1); in spawn_workers() 239 for (i = 0; abs(rendezvoused) != 1; i++) { in rendezvous_with_vcpus() 243 abs(rendezvoused) - 1); in rendezvous_with_vcpus() 252 atomic_set(&rendezvous, -nr_vcpus - 1); in rendezvous_with_vcpus() 254 atomic_set(&rendezvous, nr_vcpus + 1); in rendezvous_with_vcpus() 273 * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back in main() [all …]
|
/linux-6.14.4/drivers/iio/temperature/ |
D | mlx90635.c | 49 #define MLX90635_EE_Gb 0x0040 /* Gb calibration register 16bit */ 56 #define MLX90635_STAT_END_CONV BIT(1) /* End of conversion indicator */ 80 #define MLX90635_PWR_STATUS_SLEEP_STEP 1 119 * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1 173 .reg_stride = 1, 210 .reg_stride = 1, 289 static int mlx90635_read_ee_ambient(struct regmap *regmap, s16 *PG, s16 *PO, s16 *Gb) in mlx90635_read_ee_ambient() argument 307 *Gb = (u16)read_tmp; in mlx90635_read_ee_ambient() 313 s16 *Ga, s16 *Gb, s16 *Ha, s16 *Hb, u16 *Fa_scale) in mlx90635_read_ee_object() argument 348 *Gb = (s16)read_tmp; in mlx90635_read_ee_object() [all …]
|
/linux-6.14.4/Documentation/devicetree/bindings/net/ |
D | keystone-netcp.txt | 13 includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates 25 NetCP subsystem(10G or 1G) 40 |-> Ethernet Port 1 65 1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications. 67 - label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb. 69 "ti,netcp-gbe" for 1GbE on NetCP 1.4 70 "ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5) 71 "ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9) 72 "ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2) 84 index #1 - sgmii port3/4 module registers [all …]
|
/linux-6.14.4/Documentation/admin-guide/device-mapper/ |
D | vdo.rst | 40 Each vdo volume reserves 3GB of space for metadata, or more depending on 97 is 1. 117 completion is slow. The default is 1. 132 as hashing and compression. The default is 1. 148 enough to have at least 1 slab per physical thread. The 162 individual discard requests. The default and minimum is 1; 193 Start a previously-formatted vdo volume with 1 GB logical space and 1 GB 194 physical space, storing to /dev/dm-1 which has more than 1 GB of space. 199 "0 2097152 vdo V4 /dev/dm-1 262144 4096 32768 16380" 201 Grow the logical size to 4 GB. [all …]
|
/linux-6.14.4/Documentation/driver-api/ |
D | edac.rst | 77 A Single-ranked stick has 1 chip-select row of memory. Motherboards 201 HBM2e (2GB) channel (equivalent to 8 X 2GB ranks). This creates a total 204 While the UMC is interfacing a 16GB (8high X 2GB DRAM) HBM stack, each UMC 205 channel is interfacing 2GB of DRAM (represented as rank). 213 For example: a heterogeneous system with 1 AMD CPU is connected to 221 - CPU UMCs use 1 channel, In this case UMC = EDAC channel. This follows the 224 - GPU UMCs use 1 chip select, So UMC = EDAC CSROW. 237 mc2 |- GPU card[0] => node 0(mc1), node 1(mc2) 239 mc4 |- GPU card[1] => node 0(mc3), node 1(mc4) 241 mc6 |- GPU card[2] => node 0(mc5), node 1(mc6) [all …]
|
/linux-6.14.4/tools/perf/Documentation/ |
D | perf-iostat.txt | 1 perf-iostat(1) 40 1. List all PCIe root ports (example for 2-S platform): 54 $ perf iostat -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct 57 375083606016 bytes (375 GB, 349 GiB) copied, 215.974 s, 1.7 GB/s 62 0000:00 1 0 2 3 73 $ perf iostat 0000:17,0:3a -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct 76 375083606016 bytes (375 GB, 349 GiB) copied, 197.08 s, 1.9 GB/s 88 linkperf:perf-stat[1]
|
/linux-6.14.4/arch/csky/include/asm/ |
D | page.h | 13 #define THREAD_MASK (~(THREAD_SIZE - 1)) 14 #define THREAD_SHIFT (PAGE_SHIFT + 1) 18 * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there 20 * address region. We use them mapping kernel 1GB direct-map address area and 21 * for more than 1GB of memory we use highmem. 27 #define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
|