/aosp_15_r20/out/soong/.intermediates/frameworks/base/data/fonts/fonts.xml/android_arm64_armv8-2a_cortex-a55/ |
D | fonts.xml | 28 Order of appearance is also the tiebreaker for weight matching. This is 30 prefer the former when an 800 weight is requested. Since bold spans 31 effectively add 300 to the weight, this ensures that 900 is the bold 32 paired with the 500 weight, ensuring adequate contrast. 39 <font weight="100" style="normal">Roboto-Regular.ttf 44 <font weight="200" style="normal">Roboto-Regular.ttf 49 <font weight="300" style="normal">Roboto-Regular.ttf 54 <font weight="400" style="normal">Roboto-Regular.ttf 59 <font weight="500" style="normal">Roboto-Regular.ttf 64 <font weight="600" style="normal">Roboto-Regular.ttf [all …]
|
/aosp_15_r20/out/target/product/shiba/system/etc/ |
D | fonts.xml | 28 Order of appearance is also the tiebreaker for weight matching. This is 30 prefer the former when an 800 weight is requested. Since bold spans 31 effectively add 300 to the weight, this ensures that 900 is the bold 32 paired with the 500 weight, ensuring adequate contrast. 39 <font weight="100" style="normal">Roboto-Regular.ttf 44 <font weight="200" style="normal">Roboto-Regular.ttf 49 <font weight="300" style="normal">Roboto-Regular.ttf 54 <font weight="400" style="normal">Roboto-Regular.ttf 59 <font weight="500" style="normal">Roboto-Regular.ttf 64 <font weight="600" style="normal">Roboto-Regular.ttf [all …]
|
D | font_fallback.xml | 10 <font weight="400" style="normal">CarroisGothicSC-Regular.ttf</font> 13 <font weight="400" style="normal" postScriptName="ComingSoon-Regular">ComingSoon.ttf</font> 16 <font weight="400" style="normal" postScriptName="CutiveMono-Regular">CutiveMono.ttf</font> 22 <font weight="400" style="normal">DroidSansMono.ttf</font> 37 <font weight="400" style="normal">SourceSansPro-Regular.ttf</font> 38 <font weight="400" style="italic">SourceSansPro-Italic.ttf</font> 39 <font weight="600" style="normal">SourceSansPro-SemiBold.ttf</font> 40 <font weight="600" style="italic">SourceSansPro-SemiBoldItalic.ttf</font> 41 <font weight="700" style="normal">SourceSansPro-Bold.ttf</font> 42 <font weight="700" style="italic">SourceSansPro-BoldItalic.ttf</font> [all …]
|
/aosp_15_r20/out/target/product/shiba/obj/ETC/fonts.xml_intermediates/ |
D | fonts.xml | 28 Order of appearance is also the tiebreaker for weight matching. This is 30 prefer the former when an 800 weight is requested. Since bold spans 31 effectively add 300 to the weight, this ensures that 900 is the bold 32 paired with the 500 weight, ensuring adequate contrast. 39 <font weight="100" style="normal">Roboto-Regular.ttf 44 <font weight="200" style="normal">Roboto-Regular.ttf 49 <font weight="300" style="normal">Roboto-Regular.ttf 54 <font weight="400" style="normal">Roboto-Regular.ttf 59 <font weight="500" style="normal">Roboto-Regular.ttf 64 <font weight="600" style="normal">Roboto-Regular.ttf [all …]
|
/aosp_15_r20/frameworks/base/data/fonts/ |
H A D | fonts.xml | 28 Order of appearance is also the tiebreaker for weight matching. This is 30 prefer the former when an 800 weight is requested. Since bold spans 31 effectively add 300 to the weight, this ensures that 900 is the bold 32 paired with the 500 weight, ensuring adequate contrast. 39 <font weight="100" style="normal">Roboto-Regular.ttf 44 <font weight="200" style="normal">Roboto-Regular.ttf 49 <font weight="300" style="normal">Roboto-Regular.ttf 54 <font weight="400" style="normal">Roboto-Regular.ttf 59 <font weight="500" style="normal">Roboto-Regular.ttf 64 <font weight="600" style="normal">Roboto-Regular.ttf [all …]
|
/aosp_15_r20/external/robolectric/nativeruntime/src/main/resources/fonts/ |
H A D | fonts.xml | 16 Order of appearance is also the tiebreaker for weight matching. This is 18 prefer the former when an 800 weight is requested. Since bold spans 19 effectively add 300 to the weight, this ensures that 900 is the bold 20 paired with the 500 weight, ensuring adequate contrast. 27 <font weight="100" style="normal">Roboto-Regular.ttf 32 <font weight="200" style="normal">Roboto-Regular.ttf 37 <font weight="300" style="normal">Roboto-Regular.ttf 42 <font weight="400" style="normal">Roboto-Regular.ttf 47 <font weight="500" style="normal">Roboto-Regular.ttf 52 <font weight="600" style="normal">Roboto-Regular.ttf [all …]
|
/aosp_15_r20/external/chromium-trace/catapult/systrace/systrace/test_data/ |
H A D | profile-chrome_systrace_perf_chrome_data | 1 …weight": 297850, "ts": 196493621870.241, "cpu": 2, "comm": "ksoftirqd/2", "tid": 14106, "sf": 539}…
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_parametrization.py | 80 initial_weight_id = id(model.weight) 90 model, "weight", Resize() 95 parametrize.register_parametrization(model, "weight", Resize(), unsafe=True) 98 self.assertTrue(parametrize.is_parametrized(model, "weight")) 100 self.assertNotIn("weight", model._parameters) 101 self.assertTrue(model.weight.shape[0] == 1) 102 parametrize.remove_parametrizations(model, "weight", leave_parametrized=False) 104 self.assertEqual(model.weight, initial_model.weight) 105 self.assertEqual(id(model.weight), initial_weight_id) 109 parametrize.register_parametrization(model, "weight", Resize(), unsafe=True) [all …]
|
H A D | test_pruning.py | 101 names = ["weight", "bias"] 128 names = ["weight", "bias"] 145 names = ["weight", "bias"] 153 # weight = weight_orig * weight_mask 168 old_grad_weight = m.weight.grad.clone() # don't grab pointer! 169 self.assertEqual(old_grad_weight, torch.ones_like(m.weight)) 177 prune.identity(m, name="weight") 201 old_grad_weight = m.weight.grad.clone() # don't grab pointer! 202 self.assertEqual(old_grad_weight, torch.ones_like(m.weight)) 213 compute_mask.return_value = torch.ones_like(m.weight) [all …]
|
/aosp_15_r20/out/soong/.intermediates/frameworks/base/data/fonts/font_fallback.xml/android_arm64_armv8-2a_cortex-a55/ |
D | font_fallback.xml | 10 <font weight="400" style="normal">CarroisGothicSC-Regular.ttf</font> 13 <font weight="400" style="normal" postScriptName="ComingSoon-Regular">ComingSoon.ttf</font> 16 <font weight="400" style="normal" postScriptName="CutiveMono-Regular">CutiveMono.ttf</font> 22 <font weight="400" style="normal">DroidSansMono.ttf</font> 37 <font weight="400" style="normal">SourceSansPro-Regular.ttf</font> 38 <font weight="400" style="italic">SourceSansPro-Italic.ttf</font> 39 <font weight="600" style="normal">SourceSansPro-SemiBold.ttf</font> 40 <font weight="600" style="italic">SourceSansPro-SemiBoldItalic.ttf</font> 41 <font weight="700" style="normal">SourceSansPro-Bold.ttf</font> 42 <font weight="700" style="italic">SourceSansPro-BoldItalic.ttf</font> [all …]
|
/aosp_15_r20/out/target/product/shiba/obj/ETC/font_fallback.xml_intermediates/ |
D | font_fallback.xml | 10 <font weight="400" style="normal">CarroisGothicSC-Regular.ttf</font> 13 <font weight="400" style="normal" postScriptName="ComingSoon-Regular">ComingSoon.ttf</font> 16 <font weight="400" style="normal" postScriptName="CutiveMono-Regular">CutiveMono.ttf</font> 22 <font weight="400" style="normal">DroidSansMono.ttf</font> 37 <font weight="400" style="normal">SourceSansPro-Regular.ttf</font> 38 <font weight="400" style="italic">SourceSansPro-Italic.ttf</font> 39 <font weight="600" style="normal">SourceSansPro-SemiBold.ttf</font> 40 <font weight="600" style="italic">SourceSansPro-SemiBoldItalic.ttf</font> 41 <font weight="700" style="normal">SourceSansPro-Bold.ttf</font> 42 <font weight="700" style="italic">SourceSansPro-BoldItalic.ttf</font> [all …]
|
/aosp_15_r20/out/soong/.intermediates/frameworks/base/data/fonts/generate_font_fallback/gen/ |
D | font_fallback.xml | 10 <font weight="400" style="normal">CarroisGothicSC-Regular.ttf</font> 13 <font weight="400" style="normal" postScriptName="ComingSoon-Regular">ComingSoon.ttf</font> 16 <font weight="400" style="normal" postScriptName="CutiveMono-Regular">CutiveMono.ttf</font> 22 <font weight="400" style="normal">DroidSansMono.ttf</font> 37 <font weight="400" style="normal">SourceSansPro-Regular.ttf</font> 38 <font weight="400" style="italic">SourceSansPro-Italic.ttf</font> 39 <font weight="600" style="normal">SourceSansPro-SemiBold.ttf</font> 40 <font weight="600" style="italic">SourceSansPro-SemiBoldItalic.ttf</font> 41 <font weight="700" style="normal">SourceSansPro-Bold.ttf</font> 42 <font weight="700" style="italic">SourceSansPro-BoldItalic.ttf</font> [all …]
|
/aosp_15_r20/external/skia/resources/android_fonts/v22/ |
H A D | fonts.xml | 5 <font weight="100" style="normal"> 8 <font weight="100" style="italic">Roboto-ThinItalic.ttf</font> 9 <font weight="300" style="normal">Roboto-Light.ttf</font> 10 <font weight="300" style="italic">Roboto-LightItalic.ttf</font> 11 <font weight="400" style="normal">Roboto-Regular.ttf</font> 12 <font weight="400" style="italic">Roboto-Italic.ttf</font> 13 <font weight="500" style="normal">Roboto-Medium.ttf</font> 14 <font weight="500" style="italic">Roboto-MediumItalic.ttf</font> 15 <font weight="700" style="normal">Roboto-Bold.ttf</font> 16 <font weight="700" style="italic">Roboto-BoldItalic.ttf</font> [all …]
|
/aosp_15_r20/external/executorch/exir/passes/ |
H A D | _quant_patterns_and_replacements.py | 29 "embedding_byte(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, " 34 "embedding_byte.dtype(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, " 39 "embedding_byte.out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, " 44 "embedding_byte.dtype_out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, " 49 def embedding_weight_checks(weight, weight_scales, weight_zero_points): argument 50 assert weight.dtype in [ 53 ], f"Expecting weights to be of dtype in [torch.int8, torch.uint8], but got {weight.dtype}" 55 weight.dim() == 2 56 ), f"Expecting weight tensor to have dim()==2, but found {weight.dim()}" 65 assert weight_scales.size(0) == weight.size( [all …]
|
/aosp_15_r20/build/soong/docs/ |
H A D | kotlin_with_annotation_processors.dot | 174 lib_kotlin_sources -> lib_kapt_action [ weight=0 ]; 176 lib_kapt_action -> lib_kotlin_stubs [ weight=100 ]; 178 lib_kotlin_stubs -> lib_turbine_apt_action [ weight=100 ]; 179 lib_turbine_apt_action -> lib_apt_src_jar [ weight=100 ]; 181 lib_apt_src_jar -> lib_kotlinc_action [ weight=0 ]; 182 lib_kotlin_sources -> lib_kotlinc_action [ weight=100 ]; 184 lib_kotlinc_action -> lib_kotlin_classes, lib_kotlin_headers [ weight=100 ]; 186 lib_apt_src_jar -> lib_turbine_action [ weight=0 ]; 187 lib_kotlin_headers -> lib_turbine_action [ weight=0 ]; 188 lib_java_sources -> lib_turbine_action [ weight=100 ]; [all …]
|
H A D | kotlin.dot | 126 lib_kotlin_sources -> lib_kotlinc_action [ weight=100 ]; 128 lib_kotlinc_action -> lib_kotlin_classes, lib_kotlin_headers [ weight=100 ]; 130 lib_kotlin_headers -> lib_turbine_action [ weight=0 ]; 131 lib_java_sources -> lib_turbine_action [ weight=100 ]; 132 lib_turbine_action -> lib_java_headers [ weight=100 ]; 134 lib_kotlin_headers -> lib_javac_action [ weight=0 ]; 135 lib_java_sources -> lib_javac_action [ weight=1000 ]; 136 lib_javac_action -> lib_java_classes [ weight=100 ]; 138 lib_kotlin_classes -> lib_combine_action [ weight = 0 ]; 139 lib_java_classes -> lib_combine_action [ weight = 100 ]; [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Convolution.cpp | 219 …cudnn_depthwise_workload_with_filter(const at::Tensor& input, T stride, const at::Tensor& weight) { in check_cudnn_depthwise_workload_with_filter() argument 227 if (at::symint::size<T>(weight, 2) != at::symint::size<T>(weight, 3)) return false; in check_cudnn_depthwise_workload_with_filter() 228 auto filter = at::symint::size<T>(weight, 3); in check_cudnn_depthwise_workload_with_filter() 257 const Tensor& weight, in xnnpack_use_convolution2d() argument 264 …return xnnpack::use_convolution2d(input, weight, bias_sizes_opt, padding, stride, dilation, groups… in xnnpack_use_convolution2d() 269 const Tensor& weight, in xnnpack_use_convolution2d() argument 370 …bool use_cpu_depthwise3x3_winograd(const at::Tensor& input, const at::Tensor& weight, const std::o… in use_cpu_depthwise3x3_winograd() 375 (weight.ndimension() == 4 ) && in use_cpu_depthwise3x3_winograd() 376 (at::symint::size<T>(weight, 0) % at::symint::size<T>(input, 1) == 0) && in use_cpu_depthwise3x3_winograd() 377 (at::symint::size<T>(weight, 1) == 1) && in use_cpu_depthwise3x3_winograd() [all …]
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_stateless.py | 44 weight = torch.tensor([[1.0]], device=device) 48 parameters = {f'{prefix}.l1.weight': weight, 52 parameters = {'l1.weight': weight, 58 prev_weight = to_check.l1.weight.clone() 62 # same as the input if the weight swapping went well. 65 # check that the weight remain unmodified 66 cur_weight = to_check.l1.weight 146 {'module.weight': torch.zeros(5, device='cuda')}, 156 weight = torch.tensor([[1.0]], requires_grad=True) 159 parameters = {'l1.weight': weight, [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qlinear_prepack.cpp | 65 at::Tensor weight, in prepack() argument 68 weight.dim() == 2, in prepack() 69 "The weight tensor for quantized::linear_prepack (fbgemm) should" in prepack() 72 auto N = weight.size(0); in prepack() 73 auto K = weight.size(1); in prepack() 76 auto weight_contig = weight.contiguous(); in prepack() 77 const auto qtype = weight.qscheme(); in prepack() 80 weight_zero_points_int32[0] = {static_cast<int32_t>(weight.q_zero_point())}; in prepack() 85 weight.q_per_channel_zero_points()[i].item<int32_t>(); in prepack() 90 weight_scales_float[0] = {static_cast<float>(weight.q_scale())}; in prepack() [all …]
|
H A D | qconv_prepack.cpp | 30 at::Tensor weight, in prepack() argument 39 weight.ndimension() == kSpatialDim + 2, in prepack() 65 const int input_channels = transpose ? weight.size(0) in prepack() 66 : weight.size(1) * groups; in prepack() 68 const int output_channels = transpose ? weight.size(1) * groups in prepack() 70 : weight.size(0); in prepack() 71 const int kernel_d = kSpatialDim == 2 ? 1 : weight.size(2); in prepack() 72 const int kernel_h = weight.size(kSpatialDim); in prepack() 73 const int kernel_w = weight.size(kSpatialDim + 1); in prepack() 95 const auto qtype = weight.qscheme(); in prepack() [all …]
|
/aosp_15_r20/external/fonttools/Tests/designspaceLib/data/ |
H A D | test_v5_sourceserif.designspace | 4 <axis tag="wght" name="weight" minimum="200" maximum="900" default="400"> 41 <dimension name="weight" xvalue="0"/> 48 <dimension name="weight" xvalue="394"/> 55 <dimension name="weight" xvalue="1000"/> 62 <dimension name="weight" xvalue="0"/> 69 <dimension name="weight" xvalue="394"/> 76 <dimension name="weight" xvalue="1000"/> 83 <dimension name="weight" xvalue="0"/> 90 <dimension name="weight" xvalue="394"/> 97 <dimension name="weight" xvalue="1000"/> [all …]
|
H A D | test_v5_aktiv.designspace | 4 <axis tag="wght" name="Weight" minimum="100" maximum="900" default="400"> 56 <condition name="Weight" minimum="116" maximum="185"/> 67 <dimension name="Weight" xvalue="22"/> 74 <dimension name="Weight" xvalue="22"/> 81 <dimension name="Weight" xvalue="22"/> 88 <dimension name="Weight" xvalue="22"/> 95 <dimension name="Weight" xvalue="22"/> 102 <dimension name="Weight" xvalue="22"/> 109 <dimension name="Weight" xvalue="84"/> 116 <dimension name="Weight" xvalue="84"/> [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Convolution.cpp | 81 * Rearranges a convolution weight tensor to a layout that can be used by 89 * applied to this weight tensor: 106 at::Tensor weight = weight_in.clone(); in rearrange_weights_dw() local 108 uint32_t N = ops::get_dim<DimConv2DKernel::OutChannels>(weight); in rearrange_weights_dw() 109 uint32_t C = ops::get_dim<DimConv2DKernel::InChannels>(weight); in rearrange_weights_dw() 110 uint32_t H = ops::get_dim<DimConv2DKernel::Height>(weight); in rearrange_weights_dw() 111 uint32_t W = ops::get_dim<DimConv2DKernel::Width>(weight); in rearrange_weights_dw() 117 weight = in rearrange_weights_dw() 118 at::pad(weight, {0, 0, 0, 0, 0, 0, 0, N_padding_needed}, "constant", 0); in rearrange_weights_dw() 121 weight = weight.reshape({N_aligned, C, H * W}); in rearrange_weights_dw() [all …]
|
/aosp_15_r20/external/pytorch/test/ao/sparsity/ |
H A D | test_sparsifier.py | 41 sparsifier.prepare(model, [{"tensor_fqn": "linear1.weight"}]) 43 assert sparsifier.groups[0]["tensor_fqn"] == "linear1.weight" 57 {"tensor_fqn": "seq.0.weight", "test": 42}, 59 {"tensor_fqn": "linear2.weight"}, 64 assert sparsifier.groups[0]["tensor_fqn"] == "seq.0.weight" 67 assert sparsifier.groups[1]["tensor_fqn"] == "linear2.weight" 78 sparsifier.prepare(model, [{"tensor_fqn": "linear1.weight"}]) 80 assert torch.all(model.linear1.parametrizations.weight[0].mask[0] == 0) 86 sparsifier0.prepare(model0, [{"tensor_fqn": "linear1.weight"}]) 87 mask = model0.linear1.parametrizations["weight"][0].mask [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/ |
H A D | qconfig.py | 85 class QConfig(namedtuple("QConfig", ["activation", "weight"])): 101 weight=default_observer.with_args(dtype=torch.qint8)) 105 def __new__(cls, activation, weight): argument 107 if isinstance(activation, nn.Module) or isinstance(weight, nn.Module): 112 return super().__new__(cls, activation, weight) 119 class QConfigDynamic(namedtuple("QConfigDynamic", ["activation", "weight"])): 133 my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8)) 136 def __new__(cls, activation=torch.nn.Identity, weight=torch.nn.Identity): argument 138 if isinstance(weight, nn.Module): 143 return super().__new__(cls, activation, weight) [all …]
|