xref: /aosp_15_r20/external/pytorch/torch/csrc/jit/api/module.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 #include <c10/util/Exception.h>
3 #include <torch/csrc/autograd/variable.h>
4 #include <torch/csrc/jit/api/object.h>
5 #include <torch/csrc/jit/frontend/source_range.h>
6 #include <torch/csrc/jit/ir/ir.h>
7 #include <torch/csrc/jit/ir/named_value.h>
8 #include <torch/csrc/jit/runtime/argument_spec.h>
9 #include <torch/csrc/jit/runtime/graph_executor.h>
10 
11 #include <torch/csrc/Export.h>
12 #include <torch/csrc/api/include/torch/ordered_dict.h>
13 #include <torch/csrc/jit/api/compilation_unit.h>
14 
15 #include <ATen/core/function_schema.h>
16 #include <ATen/core/qualified_name.h>
17 #include <c10/util/ArrayRef.h>
18 #include <c10/util/irange.h>
19 #include <optional>
20 
21 #include <functional>
22 #include <memory>
23 #include <mutex>
24 #include <ostream>
25 #include <string>
26 #include <unordered_map>
27 #include <unordered_set>
28 #include <utility>
29 #include <vector>
30 
31 // This file contains classes which assist in desugaring Python style
32 // modules and their methods into flattened graphs which don't have any
33 // function calls.
34 
35 namespace torch::jit {
36 
37 using ::c10::Argument;
38 using ::c10::FunctionSchema;
39 using ::c10::QualifiedName;
40 // Map which stores filename to content.
41 using ExtraFilesMap = std::unordered_map<std::string, std::string>;
42 
43 using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
44 
45 struct Module;
46 
47 template <typename T>
48 struct slot_list_impl;
49 
50 template <typename T>
51 struct Named {
52   std::string name;
53   T value;
54 };
55 
56 using NameModule = Named<Module>;
57 using NameValue = Named<IValue>;
58 using NameTensor = Named<at::Tensor>;
59 
60 namespace detail {
61 struct TORCH_API ModulePolicy;
62 struct TORCH_API ParameterPolicy;
63 struct TORCH_API AttributePolicy;
64 struct TORCH_API BufferPolicy;
65 template <typename P>
66 struct NamedPolicy;
67 } // namespace detail
68 
69 using module_list = slot_list_impl<detail::ModulePolicy>;
70 using named_module_list =
71     slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>;
72 
73 using parameter_list = slot_list_impl<detail::ParameterPolicy>;
74 using named_parameter_list =
75     slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>;
76 
77 using attribute_list = slot_list_impl<detail::AttributePolicy>;
78 using named_attribute_list =
79     slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>;
80 
81 using buffer_list = slot_list_impl<detail::BufferPolicy>;
82 using named_buffer_list =
83     slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>;
84 
85 using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
86 
87 struct TORCH_API Module : public Object {
88   explicit Module(c10::QualifiedName class_name);
89   Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
90   Module() = default;
91   Module(const Module&) = default;
92   Module& operator=(const Module&) = default;
93   Module(Module&&) noexcept = default;
94   Module& operator=(Module&&) noexcept = default;
95   Module(
96       c10::QualifiedName,
97       std::shared_ptr<CompilationUnit> cu,
98       bool shouldMangle = false);
ModuleModule99   Module(ModulePtr module_value) : Object(std::move(module_value)) {}
100   ~Module() = default;
101 
set_optimizedModule102   void set_optimized(bool o) {
103     TORCH_WARN(
104         "Module::set_optimized() is deprecated and has no effect. "
105         "Please use setGraphExecutorOptimize()");
106   }
107 
is_optimizedModule108   bool is_optimized() const {
109     TORCH_WARN(
110         "Module::is_optimized() is deprecated and always returns true. "
111         "Please use getGraphExecutorOptimize()");
112     return true;
113   }
114 
115   IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) {
116     return get_method("forward")(std::move(inputs), kwargs);
117   }
118 
119   // In script modules, buffers are Tensors attribute that are _not_ registered
120   // as parameters. This is different than in nn.Module where there is a special
121   // register_buffer method. With this simplification, we only need to track
122   // whether a slot is a parameter to be able to classify it.
register_bufferModule123   void register_buffer(const std::string& name, at::Tensor v) {
124     bool is_param = false;
125     bool is_buffer = true;
126     std::lock_guard<std::mutex> lock(*register_mutex_);
127     type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer);
128     _ivalue()->setAttr(name, std::move(v));
129   }
130 
register_parameterModule131   void register_parameter(
132       const std::string& name,
133       at::Tensor v,
134       bool is_buffer) {
135     std::lock_guard<std::mutex> lock(*register_mutex_);
136     type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer);
137     _ivalue()->setAttr(name, std::move(v));
138   }
139 
140   void register_attribute(
141       const std::string& name,
142       const TypePtr& t,
143       IValue v,
144       bool is_param = false,
145       bool is_buffer = false) {
146     type()->addOrCheckAttribute(name, t, is_param, is_buffer);
147     _ivalue()->setAttr(name, std::move(v));
148   }
149 
register_moduleModule150   void register_module(const std::string& name, const Module& module) {
151     type()->addOrCheckAttribute(name, module.type());
152     _ivalue()->setAttr(name, module._ivalue());
153   }
154 
155   void apply(const std::function<void(Module&)>& fn);
156 
157   buffer_list buffers(bool recurse = true) const;
158   named_buffer_list named_buffers(bool recurse = true) const;
159 
160   module_list children() const; // direct modules
161   named_module_list named_children() const;
162   module_list modules() const; // all modules, including this one, recursively
163   named_module_list named_modules() const;
164 
165   // all tensors involved in gradient optimization
166   parameter_list parameters(bool recurse = true) const;
167   named_parameter_list named_parameters(bool recurse = true) const;
168 
169   // all members of the object, similar to iterating over dir(obj) in python
170   attribute_list attributes(bool recurse = true) const;
171   named_attribute_list named_attributes(bool recurse = true) const;
172 
173   void dump(
174       bool print_method_bodies,
175       bool print_attr_values,
176       bool print_param_values) const;
177 
178   std::string dump_to_str(
179       bool print_method_bodies,
180       bool print_attr_values,
181       bool print_param_values) const;
182 
183   /// Enables "training" mode.
184   void train(bool on = true);
185   /// Calls train(false) to enable "eval" mode.
186   /// Do not override this method, override `train()` instead.
evalModule187   void eval() {
188     train(/*on=*/false);
189   }
190   /// True if the module is in training mode.
is_trainingModule191   bool is_training() const {
192     return attr("training", true).toBool();
193   }
194 
195   /// Recursively casts all parameters to the given `dtype` and `device`.
196   ///
197   /// If `non_blocking` is true and the source is in pinned memory and
198   /// destination is on the GPU or vice versa, the copy is performed
199   /// asynchronously with respect to the host. Otherwise, the argument has no
200   /// effect.
201   void to(at::Device device, at::ScalarType dtype, bool non_blocking = false);
202 
203   /// Recursively casts all parameters to the given dtype.
204   ///
205   /// If `non_blocking` is true and the source is in pinned memory and
206   /// destination is on the GPU or vice versa, the copy is performed
207   /// asynchronously with respect to the host. Otherwise, the argument has no
208   /// effect.
209   void to(at::ScalarType dtype, bool non_blocking = false);
210 
211   /// Recursively moves all parameters to the given device.
212   ///
213   /// If `non_blocking` is true and the source is in pinned memory and
214   /// destination is on the GPU or vice versa, the copy is performed
215   /// asynchronously with respect to the host. Otherwise, the argument has no
216   /// effect.
217   void to(at::Device device, bool non_blocking = false);
218 
219   void save(
220       std::ostream& out,
221       const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
222 
223   void save(
224       const std::string& filename,
225       const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
226 
227   void _save_for_mobile(
228       std::ostream& out,
229       const ExtraFilesMap& extra_files = ExtraFilesMap(),
230       bool save_mobile_debug_info = false,
231       bool use_flatbuffer = false) const;
232 
233   void _save_for_mobile(
234       const std::string& filename,
235       const ExtraFilesMap& extra_files = ExtraFilesMap(),
236       bool save_mobile_debug_info = false,
237       bool use_flatbuffer = false) const;
238 
239   Module copy() const;
240 
241   Module deepcopy(std::optional<at::Device> device = std::nullopt) const;
242 
243   // Clones both the underlying `ClassType` and the module instance(data), this
244   // function creates a new `ClassType` and returns a new instance that has the
245   // same data as the current instance but with the new type, shared ClassType
246   // will be preserved as well
247   Module clone(bool inplace = false) const;
248 
249   // Clones both the underlying `ClassType` and the module instance(data), this
250   // function creates a new `ClassType` and returns a new instance that has the
251   // same data as the current instance but with the new type, shared ClassType
252   // will be preserved as well. Also allows the caller to specify a set of
253   // method and attribute names to not clone.
254   Module clone(
255       bool inplace,
256       const std::unordered_set<std::string>& ignored_method,
257       const std::unordered_set<std::string>& ignored_attributes) const;
258 
259   void clone_method(const Module& orig, const std::string& name);
260 
261   IValue operator()(std::vector<IValue> inputs);
262 
263   template <typename... Types>
create_classModule264   IValue create_class(const c10::QualifiedName& name, Types&&... args) const {
265     return create_class(name, {IValue(std::forward<Types>(args))...});
266   }
267 
268   IValue create_class(const c10::QualifiedName& name, Stack stack) const;
269 
270   inline bool operator==(const Module& y) const noexcept {
271     return _ivalue() == y._ivalue();
272   }
273 
set_delete_memoryModule274   void set_delete_memory(std::shared_ptr<char> delete_mem) {
275     mem_to_delete_ = std::move(delete_mem);
276   }
277 
278   // A set of functions to maintain input shapes through torch.jit.save and
279   // torch.jit.load. It only works on tensors and lists/dicts of tensors
280   // because tracing is only supported by these types.
store_traced_inputsModule281   void store_traced_inputs(
282       const std::string& func_name,
283       std::vector<IValue> inputs) {
284     if (inputs.empty()) {
285       return;
286     }
287     auto c10_inputs = c10::impl::GenericList(AnyType::get());
288     for (IValue& value : inputs) {
289       // Not checking whether this is traceable type as that is already checked
290       // higher up in the stack and changing that would require a larger
291       // restructuring.
292       c10_inputs.emplace_back(std::move(value));
293     }
294     traced_inputs_.insert_or_assign(func_name, c10_inputs);
295   }
296 
retrieve_traced_inputsModule297   c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs()
298       const {
299     return traced_inputs_;
300   }
301 
302  private:
303   Module clone_impl(
304       std::unordered_map<TypePtr, TypePtr>& type_remap,
305       bool inplace,
306       IValue::HashIdentityIValueMap memo,
307       const std::unordered_set<std::string>& ignored_methods,
308       const std::unordered_set<std::string>& ignored_attributes) const;
309 
310   void clone_method(
311       const Module& orig,
312       const Function& method,
313       const std::unordered_map<TypePtr, TypePtr>& type_remap);
314 
getNameForMethodModule315   c10::QualifiedName getNameForMethod(std::string basename) const {
316     return QualifiedName(*type()->name(), std::move(basename));
317   }
318 
319   void to_impl(
320       const std::optional<at::Device>& device,
321       const std::optional<at::ScalarType>& dtype,
322       bool non_blocking);
323 
324   // Extra handle for the module to delete when itself is deleted
325   std::shared_ptr<char> mem_to_delete_;
326 
327   // Map of function names to the traced inputs that they have been traced with
328   c10::Dict<std::string, c10::impl::GenericList> traced_inputs_;
329 
330   // Mutex to keep registring buffer or parameter thread safe.
331   std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>();
332 };
333 
334 // C++ equivalent api of `torch.jit.freeze`. See documentation there for
335 // details.
336 TORCH_API Module freeze(
337     const Module& module,
338     const std::optional<std::vector<std::string>>& preserved_attrs =
339         std::nullopt,
340     bool optimize_numerics = true);
341 
342 // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
343 // there for details.
344 TORCH_API Module optimize_for_inference(
345     Module& module,
346     const std::vector<std::string>& other_methods = {});
347 
348 enum class FusionBehavior { STATIC, DYNAMIC };
349 
350 using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>;
351 // clang-format off
352 /*
353 Sets the type and number of specializations that can occur during fusion.
354 
355 Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC
356 and depth is an integer.
357 
358 Behavior - static vs dynamic:
359     In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
360     based on some initial profiling runs.
361     In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
362     shapes are possible.
363 
364 In both cases, we also recompile on new striding behavior, device, or dtype.
365 
366 Behavior - fallback functions & depth:
367     When an input doesn't match the format required by the specialized compiled op, it will run
368     a fallback function. Fallback functions are recursively be compiled and specialized based
369     on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
370     limit the number of specializations that can be compiled, before giving up on recompiling and
371     falling back to a completely un-fused, un-specialized implementation.
372 
373 The list of (type, depth) pairs controls the type of specializations and the number of
374 specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first
375 two specializations will use static fusions, the following two specializations will use
376 dynamic fusion, and any inputs that satisfy none of the 4 options will run an
377 unfused implementation.
378 
379 NB: in the future, if more as more fusion backends are added there may be more granular
380 apis for specific fusers.
381 */
382 // clang-format on
383 TORCH_API FusionStrategy getFusionStrategy();
384 // returns previous strategy
385 TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
386 
387 namespace detail {
388 
389 struct TORCH_API SlotCursor {
390   Module module_;
391   int64_t i_; // slot offset, -1 indicates the module itself
392 };
393 
394 } // namespace detail
395 
396 // This iterator allows the (optionally recursive) enumeration of
397 // the  members of a Module. It performs a depth-first pre-order
398 // traversal of the module. The Policy template parameter determines
399 // which slots of the object should be included. For instance,
400 // when iterating parameters, we return the parameter tensors,
401 // but skip modules, buffers, and other attributes.
402 // See ModulePolicy for comments about Policy object's API.
403 template <typename Policy>
404 struct slot_iterator_impl {
405   using SlotCursor = detail::SlotCursor;
406   using value_type = typename Policy::value_type;
slot_iterator_implslot_iterator_impl407   slot_iterator_impl(
408       Module root,
409       bool recurse, // if true, do a depth-first search, otherwise, just look at
410                     // slots of root
411       bool return_module) // if true include root itself as the first thing
412                           // visited (used in modules())
413       : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
414         recurse_(recurse) {
415     // advance iterator to first valid element (or the end, if empty)
416     while_not_valid_next();
417   }
418   // empty cursors_, represents end of iteration
slot_iterator_implslot_iterator_impl419   slot_iterator_impl() : recurse_(false) {}
420   value_type operator*() const {
421     return Policy::create(cursors_, cur());
422   }
423   value_type operator->() const {
424     return **this;
425   }
426   slot_iterator_impl& operator++() {
427     next_valid();
428     return *this;
429   }
430   slot_iterator_impl operator++(int) {
431     // this is really expensive, should we delete it so people don't use it
432     // instead of prefix?
433     slot_iterator_impl old = *this;
434     ++(*this);
435     return old;
436   }
437 
438  private:
439   // return_module() is a corner case where instead of returning a submodule
440   // of root, we are returning root itself, because we are iterating modules(),
441   // which contains the root module itself.
442   // It is represented with a single SlotCursor whose index is -1.
return_moduleslot_iterator_impl443   bool return_module() const {
444     return top().i_ == -1;
445   }
topslot_iterator_impl446   const SlotCursor& top() const {
447     return cursors_.back();
448   }
topslot_iterator_impl449   SlotCursor& top() {
450     return cursors_.back();
451   }
curslot_iterator_impl452   IValue cur() const {
453     return return_module() ? top().module_._ivalue()
454                            : top().module_._ivalue()->getSlot(top().i_);
455   }
456 
457   // advance to the next slot in a depth first pre-order traversal of the
458   // modules slots. This function does not guarantee the next slot is a
459   // valid element of the iteration. That is done by valid().
460   // invariant: !cursors_.empty()
nextslot_iterator_impl461   void next() {
462     // we just returned the module itself, advance i_ to 0 so we are now
463     // at the first slot of the module.
464     if (return_module()) {
465       ++top().i_;
466       return;
467     }
468     // the last traversal action advanced beyond the number of slots in the
469     // module so continue the iteration in the parent.
470     if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) {
471       cursors_.pop_back();
472       if (!cursors_.empty()) {
473         ++top().i_;
474       }
475       return;
476     }
477     // if the current thing is a module, we have to scan it for recursive
478     // traversals. We do this by adding a new SlotCursor to track the traversal.
479     if (recurse_ &&
480         top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) {
481       cursors_.emplace_back(SlotCursor{cur().toModule(), 0});
482       return;
483     }
484     // common case: advance to the next slot.
485     ++top().i_;
486   }
487   // is the current position of the iterator a valid one?
488   // otherwise, we have to continue advancing.
validslot_iterator_impl489   bool valid() const {
490     return top().i_ <
491         int64_t(top().module_._ivalue()->type()->numAttributes()) &&
492         Policy::valid(
493                top().module_._ivalue()->type(),
494                top().i_,
495                top().module_._ivalue()->getSlot(top().i_));
496   }
while_not_valid_nextslot_iterator_impl497   void while_not_valid_next() {
498     // advance iteration until we are either at the end (cursors_.empty())
499     // or in a valid state. return_module() is a special case,
500     // and is always considered valid, regardless of Policy, because it is
501     // it is only true when we are iterating modules.
502     while (!cursors_.empty() && !return_module() && !valid()) {
503       next();
504     }
505   }
next_validslot_iterator_impl506   void next_valid() {
507     // avoid crashing if this is empty
508     if (cursors_.empty()) {
509       return;
510     }
511     // advance to next element, which is maybe not valid
512     next();
513     while_not_valid_next();
514   }
515 
516   std::vector<SlotCursor> cursors_;
517   bool recurse_;
518 
519   friend inline bool operator!=(
520       const slot_iterator_impl<Policy>& a,
521       const slot_iterator_impl<Policy>& b) {
522     // we are finished iteration when we have no more iteration SlotCursors.
523     // end is always an empty iterator with no cursors.
524     return (a.cursors_.empty() != b.cursors_.empty());
525   }
526 };
527 
528 // This type represents lists of parameters, attributes, and
529 // submodules contained in the module. It is abstract because
530 // they are not stored directly in std::vectors but inside the
531 // module's IValue object itself.
532 template <typename Policy>
533 struct slot_list_impl {
534   using iterator = slot_iterator_impl<Policy>;
535   using const_iterator = slot_iterator_impl<Policy>;
536   using value_type = typename iterator::value_type;
beginslot_list_impl537   slot_iterator_impl<Policy> begin() const {
538     return slot_iterator_impl<Policy>(module_, recurse_, return_module_);
539   }
endslot_list_impl540   slot_iterator_impl<Policy> end() const {
541     return slot_iterator_impl<Policy>();
542   }
sizeslot_list_impl543   size_t size() const {
544     if (!size_) {
545       size_ = size_t(0);
546       for ([[maybe_unused]] const value_type& _ : *(this)) {
547         ++*size_;
548       }
549     }
550     return *size_;
551   }
552 
slot_list_implslot_list_impl553   slot_list_impl(Module module, bool recurse, bool return_module)
554       : module_(std::move(module)),
555         recurse_(recurse),
556         return_module_(return_module),
557         size_(std::nullopt) {
558     if (!recurse && !return_module && Policy::all_slots) {
559       size_ = module_.num_slots();
560     }
561   }
562 
563  private:
564   Module module_;
565   bool recurse_;
566   bool return_module_;
567   // size of this list, cached on first request
568   // when we need to filter the slot list
569   mutable std::optional<size_t> size_;
570   friend struct Module;
571 };
572 
573 namespace detail {
574 
575 // slot_iterator_impl always iterate over all the slots in a module,
576 // the Policy template argument determines slots should be returned and their
577 // types
578 struct TORCH_API ModulePolicy {
579   // the type of the value being returned
580   using value_type = Module;
581 
582   // the logic for creating the type being returned, given the raw IValue
583   // of that object.
createModulePolicy584   static value_type create(
585       const std::vector<detail::SlotCursor>& cursors,
586       IValue v) {
587     return Module(std::move(v).toObject());
588   }
589   // is slot i in typ something that this iterator should return, otherwise,
590   // we skip it.
validModulePolicy591   static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
592     return typ->getAttribute(i)->is_module();
593   }
594   // are we going to return everything? If so, we can optimize the calculate
595   // of the size of the list.
596   static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
597 };
598 
599 struct TORCH_API ParameterPolicy {
600   using value_type = at::Tensor;
createParameterPolicy601   static value_type create(
602       const std::vector<detail::SlotCursor>& cursors,
603       IValue v) {
604     return std::move(v).toTensor();
605   }
validParameterPolicy606   static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
607     return typ->is_parameter(i) && v.isTensor();
608   }
609   static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
610 };
611 
612 struct TORCH_API BufferPolicy {
613   using value_type = at::Tensor;
createBufferPolicy614   static value_type create(
615       const std::vector<detail::SlotCursor>& cursors,
616       IValue v) {
617     return std::move(v).toTensor();
618   }
validBufferPolicy619   static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
620     return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
621         typ->is_buffer(i);
622   }
623   static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
624 };
625 
626 struct TORCH_API AttributePolicy {
627   using value_type = IValue;
createAttributePolicy628   static value_type create(
629       const std::vector<detail::SlotCursor>& cursors,
630       IValue v) {
631     return v;
632   }
validAttributePolicy633   static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
634     return true;
635   }
636   static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
637 };
638 
639 // take a Policy object, and make a version of it that returns the slot.
640 // along with the fully qualified name of that slot. This is used for the named_
641 // variants like named_parameters().
642 template <typename Policy>
643 struct NamedPolicy {
644   using value_type = Named<typename Policy::value_type>;
createNamedPolicy645   static value_type create(
646       const std::vector<detail::SlotCursor>& cursors,
647       IValue v) {
648     std::string name;
649     if (cursors.size() == 1) {
650       name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back());
651     } else {
652       std::ostringstream ss;
653       for (const auto i : c10::irange(cursors.size())) {
654         if (i > 0) {
655           ss << ".";
656         }
657         ss << nameFragment(cursors[i]);
658       }
659       name = ss.str();
660     }
661     return value_type{std::move(name), Policy::create(cursors, std::move(v))};
662   }
validNamedPolicy663   static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) {
664     return Policy::valid(t, i, v);
665   }
666   static constexpr bool all_slots = Policy::all_slots;
667 
668  private:
nameFragmentNamedPolicy669   static std::string nameFragment(const detail::SlotCursor& f) {
670     return f.module_.type()->getAttributeName(f.i_);
671   }
672 };
673 
674 } // namespace detail
675 
676 TORCH_API bool& getInlineEverythingMode();
677 
678 namespace script {
679 // We once had a `script::` namespace that was deleted. This is for backcompat
680 // of the public API; new code should not use this type alias.
681 using Module = ::torch::jit::Module;
682 using ExtraFilesMap = ::torch::jit::ExtraFilesMap;
683 } // namespace script
684 
685 } // namespace torch::jit
686