xref: /aosp_15_r20/external/pytorch/aten/src/ATen/MapAllocator.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/MapAllocator.h>
2 
3 #include <atomic>
4 #include <random>
5 #include <string>
6 #if ATOMIC_INT_LOCK_FREE == 2
7 #define AT_ATOMIC_IPC_REFCOUNT 1
8 #endif
9 
10 #include <c10/core/CPUAllocator.h>
11 
12 #ifdef _WIN32
13 #include <c10/util/Unicode.h>
14 #endif
15 
16 #if defined(HAVE_MMAP)
17 #include <fcntl.h>
18 #include <sys/mman.h>
19 #include <sys/stat.h>
20 #endif
21 
22 #if !defined(_MSC_VER) || defined(HAVE_MMAP)
23 #include <sys/types.h>
24 #include <unistd.h>
25 #elif defined(_MSC_VER)
26 #include <c10/util/win32-headers.h>
27 #endif
28 #include <fmt/format.h>
29 
30 namespace at {
31 
32 static constexpr int64_t map_alloc_alignment = 64;
33 
NewProcessWideShmHandle()34 std::string NewProcessWideShmHandle() {
35   static std::atomic<uint64_t> counter{0};
36   static std::random_device rd;
37 #ifdef _MSC_VER
38   return fmt::format(
39       "/torch_{}_{}_{}",
40       GetCurrentProcessId(),
41       rd(),
42       counter.fetch_add(1, std::memory_order_relaxed));
43 #else
44   return fmt::format(
45       "/torch_{}_{}_{}",
46       getpid(),
47       rd(),
48       counter.fetch_add(1, std::memory_order_relaxed));
49 #endif
50 }
51 #if defined(_WIN32) || defined(HAVE_MMAP)
52 
53 namespace {
54 struct MapInfo {
55   std::atomic<int> refcount;
56 };
57 
58 constexpr const char* unknown_filename = "filename not specified";
59 #ifdef _WIN32
60 constexpr const char* unknown_eventname = "eventname not specified";
61 #endif
62 }  // namespace (anonymous)
63 
MapAllocator(WithFd,c10::string_view filename,int fd,int flags,size_t size)64 MapAllocator::MapAllocator(WithFd, c10::string_view filename, int fd, int flags, size_t size)
65   : filename_(filename.empty() ? unknown_filename : filename)
66   , size_(0) // to be filled later
67 #ifdef _WIN32
68   , handle_(INVALID_HANDLE_VALUE) // to be filled later
69   , event_(INVALID_HANDLE_VALUE) // to be filled later
70   , eventname_(filename.empty() ? unknown_eventname : (std::string(filename) + "_event"))
71 #else
72   , fd_(fd)
73 #endif
74 {
75 
76   if (!(flags & ALLOCATOR_MAPPED_SHARED) && !(flags & ALLOCATOR_MAPPED_SHAREDMEM)) {
77     flags &= ~ALLOCATOR_MAPPED_NOCREATE;
78   }
79   if ((flags ^ ALLOCATOR_MAPPED_EXCLUSIVE) == 0) {
80     TORCH_CHECK(false, "ALLOCATOR_MAPPED_EXCLUSIVE flag requires opening the file in shared mode");
81   }
82 #ifdef _WIN32
83   if (fd != -1) {
84     TORCH_CHECK(false, "MapAllocator_newWithFd is unsupported on Windows");
85   }
86 #endif
87   flags_ = flags;
88 
89   // OK, now do the allocation
90 
91   if (size == 0) {
92     return;
93   }
94 
95 #ifdef _WIN32
96   if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
97     // Shadowing
98     const wchar_t *filename;
99     const wchar_t *eventname;
100     const std::wstring wFilename = c10::u8u16(filename_);
101     const std::wstring wEventname = c10::u8u16(eventname_);
102     LARGE_INTEGER hfilesz;
103 
104     if (filename_[0] == '/') {
105       filename = wFilename.c_str() + 1;
106       eventname = wEventname.c_str() + 1;
107     } else {
108       filename = wFilename.c_str();
109       eventname = wEventname.c_str();
110     }
111 
112     hfilesz.QuadPart = size;
113 
114     if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
115       event_ = CreateEventW(nullptr, FALSE, FALSE, eventname);
116     } else if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
117       event_ = OpenEventW(EVENT_ALL_ACCESS, FALSE, eventname);
118     } else {
119       TORCH_CHECK(false, "Expected either ALLOCATOR_MAPPED_EXCLUSIVE or ALLOCATOR_MAPPED_NOCREATE");
120     }
121 
122     if (event_ == nullptr) {
123       TORCH_CHECK(false, "Couldn't open shared event: <", eventname, ">, error code: <", GetLastError(), ">");
124     }
125 
126     if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
127       handle_ = CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, hfilesz.HighPart, hfilesz.LowPart, filename);
128     } else if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
129       handle_ = OpenFileMappingW(FILE_MAP_ALL_ACCESS, FALSE, filename);
130     } else {
131       TORCH_CHECK(false, "Expected either ALLOCATOR_MAPPED_EXCLUSIVE or ALLOCATOR_MAPPED_NOCREATE");
132     }
133 
134     if (handle_ == nullptr) {
135       TORCH_CHECK(false, "Couldn't open shared file mapping: <", filename, ">, error code: <", GetLastError(), ">");
136     }
137 
138     size_ = size;
139     base_ptr_ = MapViewOfFile(handle_, FILE_MAP_ALL_ACCESS, 0, 0, size);
140     if (!base_ptr_) {
141       TORCH_CHECK(false, "Couldn't map view of shared file <", filename, ">, error code: <", GetLastError(), ">");
142     }
143   } else {
144 
145     HANDLE hfile;
146     HANDLE hmfile;
147     LARGE_INTEGER hfilesz;
148 
149     if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
150       TORCH_CHECK(false, "exclusive file mapping is not supported on Windows");
151     }
152     if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
153       TORCH_CHECK(false, "file mapping without creation is not supported on Windows");
154     }
155     if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
156       TORCH_CHECK(false, "ALLOCATOR_MAPPED_KEEPFD not supported on Windows");
157     }
158     if (flags_ & ALLOCATOR_MAPPED_FROMFD) {
159       TORCH_CHECK(false, "ALLOCATOR_MAPPED_FROMFD not supported on Windows");
160     }
161 
162     // Shadowing
163     const wchar_t *filename;
164     const std::wstring wFilename = c10::u8u16(filename_);
165 
166     filename = wFilename.c_str();
167 
168     /* open file */
169     /* FILE_FLAG_RANDOM_ACCESS ? */
170     if (flags_) {
171       hfile = CreateFileW(filename, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
172       if (hfile == INVALID_HANDLE_VALUE) {
173         TORCH_CHECK(false, "could not open file <", filename_, "> in read-write mode; error code: <", GetLastError(), ">");
174       }
175     } else {
176       hfile = CreateFileW(filename, GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
177       if (hfile == INVALID_HANDLE_VALUE) {
178         TORCH_CHECK(false, "could not open file <", filename_, "> in read-only mode; error code: <", GetLastError(), ">");
179       }
180     }
181 
182     if (GetFileSizeEx(hfile, &hfilesz) == 0) {
183       TORCH_CHECK(false, "could not get file size: <", filename_, ">; error code: <", GetLastError(), ">");
184     }
185 
186     if (size > 0) {
187       if (size > hfilesz.QuadPart) {
188         if (flags_) {
189           hfilesz.QuadPart = size;
190           if (SetFilePointerEx(hfile, hfilesz, NULL, FILE_BEGIN) == 0) {
191             CloseHandle(hfile);
192             TORCH_CHECK(false, "unable to stretch file <", filename_, "> to the right size; error code: <", GetLastError(), ">", filename_);
193           }
194           if (SetEndOfFile(hfile) == 0) {
195             CloseHandle(hfile);
196             TORCH_CHECK(false, "unable to write to file <", filename_, ">; error code: <", GetLastError(), ">");
197           }
198         } else {
199           CloseHandle(hfile);
200           TORCH_CHECK(false, "file <", filename_, "> size <", hfilesz.QuadPart, "> is smaller than the required mapping size <", size, ">; error code: <", GetLastError(), ">");
201         }
202       }
203     } else {
204       size = hfilesz.QuadPart;
205     }
206 
207     size_ = size; /* if we are here, it must be the right size */
208 
209     hfilesz.QuadPart = size_;
210 
211     /* get map handle */
212     if (flags_) {
213       if ( (hmfile = CreateFileMappingW(hfile, NULL, PAGE_READWRITE, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) {
214         TORCH_CHECK(false, "could not create a map on file <", filename_, ">; error code: <", GetLastError(), ">");
215       }
216     } else {
217       if ( (hmfile = CreateFileMappingW(hfile, NULL, PAGE_WRITECOPY, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) {
218         TORCH_CHECK(false, "could not create a map on file <", filename_, ">; error code: <", GetLastError(), ">");
219       }
220     }
221 
222     /* map the stuff */
223     if(flags_) {
224       base_ptr_ = MapViewOfFile(hmfile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
225     } else {
226       base_ptr_ = MapViewOfFile(hmfile, FILE_MAP_COPY, 0, 0, 0);
227     }
228 
229     CloseHandle(hfile);
230     CloseHandle(hmfile);
231   }
232 #else /* _WIN32 */
233   {
234     /* open file */
235     int fd{-1};
236     int flags{}; // shadow
237 
238     if (flags_ & (ALLOCATOR_MAPPED_SHARED | ALLOCATOR_MAPPED_SHAREDMEM)) {
239       flags = O_RDWR | O_CREAT;
240     } else {
241       flags = O_RDONLY;
242     }
243 
244     if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
245       flags |= O_EXCL;
246     }
247     if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
248       flags &= ~O_CREAT;
249     }
250 
251     if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
252       if (flags_ & ALLOCATOR_MAPPED_SHARED) {
253         // NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
254         if ((fd = open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
255           TORCH_CHECK(false, "unable to open file <", filename_, "> in read-write mode: ", strerror(errno), " (", errno, ")");
256         }
257       } else if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
258 #ifdef HAVE_SHM_OPEN
259         // NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
260         if((fd = shm_open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
261           TORCH_CHECK(false, "unable to open shared memory object <", filename_, "> in read-write mode: ", strerror(errno), " (", errno, ")");
262         }
263 #else
264         TORCH_CHECK(false, "unable to open file <", filename_, "> in sharedmem mode, shm_open unavailable on this platform");
265 #endif
266       } else {
267         // NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
268         if ((fd = open(filename_.c_str(), O_RDONLY)) == -1) {
269           TORCH_CHECK(false, "unable to open file <", filename_, "> in read-only mode: ", strerror(errno), " (", errno, ")");
270         }
271       }
272     } else {
273       fd = fd_;
274     }
275 
276     struct stat file_stat{};
277     if (fstat(fd, &file_stat) == -1) {
278 #ifndef STRIP_ERROR_MESSAGES
279       int last_err = errno;
280 #endif
281       if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
282         ::close(fd);
283       }
284       TORCH_CHECK(false, "unable to stat the file <", filename_, ">: ", strerror(last_err), " (", last_err, ")");
285     }
286 
287     if (size > 0) {
288       if (static_cast<int64_t>(size) > file_stat.st_size) {
289         if (flags_) {
290           if (ftruncate(fd, static_cast<off_t>(size)) == -1) {
291             TORCH_CHECK(false, "unable to resize file <", filename_, "> to the right size: ", strerror(errno), " (", errno, ")");
292           }
293           if (fstat(fd, &file_stat) == -1 || file_stat.st_size < static_cast<int64_t>(size)) {
294 #ifndef STRIP_ERROR_MESSAGES
295             int last_err = errno;
296 #endif
297             ::close(fd);
298             TORCH_CHECK(false, "unable to stretch file <", filename_, "> to the right size: ", strerror(last_err), " (", last_err, ")");
299           }
300 /* on macOS write returns with errno 45 (Opperation not supported) when used
301  * with a file descriptor obtained via shm_open
302  */
303 #ifndef __APPLE__
304           if ((write(fd, "", 1)) != 1) /* note that the string "" contains the '\0' byte ... */ {
305 #ifndef STRIP_ERROR_MESSAGES
306             int last_err = errno;
307 #endif
308             ::close(fd);
309             TORCH_CHECK(false, "unable to write to file <", filename_, ">: ", strerror(last_err), " (", last_err, ")");
310           }
311 #endif
312         } else {
313           ::close(fd);
314           TORCH_CHECK(false, "file <", filename_, "> size <",  file_stat.st_size, "> is smaller than the required mapping size <", size, ">");
315         }
316       }
317     } else {
318       size = file_stat.st_size;
319     }
320 
321     size_ = static_cast<ptrdiff_t>(size); /* if we are here, it must be the right size */
322 
323     /* map it */
324     if (flags_ & (ALLOCATOR_MAPPED_SHARED | ALLOCATOR_MAPPED_SHAREDMEM)) {
325       base_ptr_ = mmap(nullptr, size_, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
326     } else {
327       base_ptr_ = mmap(nullptr, size_, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
328     }
329 
330     if (base_ptr_ == MAP_FAILED) {
331       base_ptr_ = nullptr; /* let's be sure it is NULL */
332       TORCH_CHECK(false, "unable to mmap ", size_, " bytes from file <", filename_, ">: ", strerror(errno), " (", errno, ")");
333     }
334 
335 #if !defined(__APPLE__) && !defined(__ANDROID__)
336     /* attempt to use larger block size on Linux, which is important for getting better CUDA upload speed */
337     posix_fadvise(fd, 0, static_cast<off_t>(size), POSIX_FADV_SEQUENTIAL);
338 #endif
339 
340     if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
341       fd_ = fd;
342     } else {
343       if (::close(fd) == -1) {
344         TORCH_CHECK(false, "Error closing file <", filename_, ">: ", strerror(errno), " (", errno, ")");
345       }
346       fd_ = -1;
347     }
348 
349     if (flags_ & ALLOCATOR_MAPPED_UNLINK) {
350       if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
351 #ifdef HAVE_SHM_UNLINK
352         if (shm_unlink(filename_.c_str()) == -1) {
353           TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, " : ", strerror(errno), " (", errno, ")");
354         }
355 #else
356         TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
357 #endif
358       } else {
359         if (unlink(filename_.c_str()) == -1)
360           TORCH_CHECK(false, "could not unlink file ", filename_, " : ", strerror(errno), " (", errno, ")");
361       }
362     }
363 
364     if (base_ptr_ == MAP_FAILED) {
365       TORCH_CHECK(false, "$ Torch: unable to mmap memory: you tried to mmap ", size_/1073741824, " GB.");
366     }
367   }
368 #endif
369   c10::reportMemoryUsageToProfiler(base_ptr_, size_, 0, size_, c10::Device(c10::DeviceType::CPU));
370 }
371 
MapAllocator(c10::string_view filename,int flags,size_t size)372 MapAllocator::MapAllocator(c10::string_view filename, int flags, size_t size)
373   : MapAllocator(WITH_FD, filename, -1, flags, size)
374 {}
375 
376 #ifdef _WIN32
377 struct ReleaseContext {
378   HANDLE event;
379   HANDLE handle;
380   HANDLE wait;
381 };
WaitForReleaseHandle(PVOID lpParam,BOOLEAN TimerOrWaitFired)382 static void CALLBACK WaitForReleaseHandle(PVOID lpParam, BOOLEAN TimerOrWaitFired)
383 {
384   if (lpParam) {
385     ReleaseContext *ctx = (ReleaseContext *)lpParam;
386 
387     SetEvent(ctx->event);
388     CloseHandle(ctx->event);
389     CloseHandle(ctx->handle);
390 
391     UnregisterWait(ctx->wait);
392 
393     delete ctx;
394   }
395 }
396 #endif
397 
close()398 void MapAllocator::close() {
399   if (closed_) {
400     return;
401   }
402   closed_ = true;
403   if (base_ptr_ == nullptr) {
404     return;
405   }
406 #ifdef _WIN32
407   if ((flags_ & ALLOCATOR_MAPPED_KEEPFD) || (flags_ & ALLOCATOR_MAPPED_SHAREDMEM))
408     CloseHandle(handle_);
409   if(UnmapViewOfFile(base_ptr_) == 0)
410     TORCH_CHECK(false, "could not unmap the shared memory file");
411 #else /* _WIN32 */
412   if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
413     if (::close(fd_) == -1) {
414       TORCH_CHECK(false, "could not close file descriptor ", fd_, " :", strerror(errno), " (", errno, ")" );
415     }
416   }
417 
418   if (munmap(base_ptr_, size_)) {
419     TORCH_CHECK(false, "could not unmap the shared memory file: ", strerror(errno), " (", errno, ")");
420   }
421 
422   if (!(flags_ & (ALLOCATOR_MAPPED_FROMFD | ALLOCATOR_MAPPED_UNLINK))) {
423     if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
424 #ifdef HAVE_SHM_UNLINK
425       if (shm_unlink(filename_.c_str()) == -1) {
426         TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, " : ", strerror(errno), " (", errno, ")");
427       }
428 #else
429       TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
430 #endif
431     }
432   }
433 #endif /* _WIN32 */
434 }
435 
436 #else /* defined(_WIN32) || defined(HAVE_MMAP) */
437 
MapAllocator(c10::string_view filename,int flags,size_t size)438 MapAllocator::MapAllocator(c10::string_view filename, int flags, size_t size) {
439   TORCH_CHECK(false, "file mapping not supported on your system");
440 }
441 
MapAllocator(WithFd,c10::string_view filename,int fd,int flags,size_t size)442 MapAllocator::MapAllocator(WithFd, c10::string_view filename, int fd, int flags, size_t size) {
443   TORCH_CHECK(false, "file mapping not supported on your system");
444 }
445 
close()446 void MapAllocator::close() { }
447 
448 #endif
449 
450 #if (defined(_WIN32) || defined(HAVE_MMAP)) && defined(AT_ATOMIC_IPC_REFCOUNT)
451 
RefcountedMapAllocatorArgCheck(int flags)452 RefcountedMapAllocatorArgCheck::RefcountedMapAllocatorArgCheck(int flags) {
453   if (flags & ALLOCATOR_MAPPED_FROMFD) {
454     TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_FROMFD flag");
455   }
456   if (flags & ALLOCATOR_MAPPED_KEEPFD) {
457     TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_KEEPFD flag");
458   }
459   if (flags & ALLOCATOR_MAPPED_UNLINK) {
460     TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_UNLINK flag");
461   }
462   if (!(flags & ALLOCATOR_MAPPED_SHAREDMEM)) {
463     TORCH_CHECK(false, "RefcountedMapAllocator requires ALLOCATOR_MAPPED_SHAREDMEM flag");
464   }
465 }
466 
RefcountedMapAllocator(const char * filename,int flags,size_t size)467 RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags, size_t size)
468   : RefcountedMapAllocatorArgCheck(flags)
469   , MapAllocator(filename, flags, size + map_alloc_alignment) {
470 
471     initializeAlloc();
472 }
RefcountedMapAllocator(WithFd,const char * filename,int fd,int flags,size_t size)473 RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
474   : RefcountedMapAllocatorArgCheck(flags)
475   , MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) {
476 
477     initializeAlloc();
478 }
479 
initializeAlloc()480 void RefcountedMapAllocator::initializeAlloc() {
481   TORCH_CHECK(base_ptr_, "base_ptr_ is null");
482   MapInfo *map_info = (MapInfo*)base_ptr_;
483 
484 #ifdef _WIN32
485   ReleaseContext* r_ctx = new ReleaseContext;
486   r_ctx->handle = handle_;
487   r_ctx->event = event_;
488   r_ctx->wait = NULL;
489   BOOL can_wait = RegisterWaitForSingleObject(&r_ctx->wait, event_, WaitForReleaseHandle, (PVOID)r_ctx, INFINITE, WT_EXECUTEONLYONCE);
490   TORCH_CHECK(can_wait, "Couldn't register wait on event, error code: <", GetLastError(), ">");
491 #endif
492 
493   if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
494     new (&map_info->refcount) std::atomic<int>(1);
495   } else {
496     map_info->refcount++;
497   }
498 }
499 
close()500 void RefcountedMapAllocator::close() {
501   if (closed_) {
502     return;
503   }
504   closed_ = true;
505 
506   void* data = base_ptr_;
507 
508 #ifdef _WIN32
509   MapInfo *info = (MapInfo*)data;
510   if (--info->refcount == 0) {
511     SetEvent(event_);
512   }
513   if(UnmapViewOfFile(data) == 0) {
514     TORCH_CHECK(false, "could not unmap the shared memory file");
515   }
516 #else /* _WIN32 */
517 
518   MapInfo *info = (MapInfo*)(data);
519   if (--info->refcount == 0) {
520 #ifdef HAVE_SHM_UNLINK
521     if (shm_unlink(filename_.c_str()) == -1) {
522       TORCH_CHECK(false, "could not unlink the shared memory file ", filename_);
523     }
524 #else
525     TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
526 #endif /* HAVE_SHM_UNLINK */
527   }
528   if (munmap(info, size_)) {
529     TORCH_CHECK(false, "could not unmap the shared memory file ", filename_);
530   }
531 #endif /* _WIN32 */
532 }
533 
incref()534 void RefcountedMapAllocator::incref()
535 {
536   MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
537   ++map_info->refcount;
538 }
539 
decref()540 int RefcountedMapAllocator::decref()
541 {
542   MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
543   return --map_info->refcount == 0;
544 }
545 
546 #else
547 
548 
RefcountedMapAllocatorArgCheck(int flags)549 RefcountedMapAllocatorArgCheck::RefcountedMapAllocatorArgCheck(int flags) {}
550 
RefcountedMapAllocator(const char * filename,int flags,size_t size)551 RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags, size_t size)
552   : RefcountedMapAllocatorArgCheck(flags),
553     MapAllocator(filename, flags, size + map_alloc_alignment)
554 {
555   TORCH_CHECK(false, "refcounted file mapping not supported on your system");
556 }
557 
RefcountedMapAllocator(WithFd,const char * filename,int fd,int flags,size_t size)558 RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
559   : RefcountedMapAllocatorArgCheck(flags),
560     MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment)
561 {
562   TORCH_CHECK(false, "refcounted file mapping not supported on your system");
563 }
564 
initializeAlloc()565 void RefcountedMapAllocator::initializeAlloc() {}
566 
close()567 void RefcountedMapAllocator::close() {}
568 
569 #endif
570 
deleteMapAllocator(void * ptr)571 static void deleteMapAllocator(void* ptr) {
572   delete static_cast<MapAllocator*>(ptr);
573 }
574 
deleteRefcountedMapAllocator(void * ptr)575 static void deleteRefcountedMapAllocator(void* ptr) {
576   delete static_cast<RefcountedMapAllocator*>(ptr);
577 }
578 
fromDataPtr(const at::DataPtr & dptr)579 MapAllocator* MapAllocator::fromDataPtr(const at::DataPtr& dptr) {
580   return dptr.cast_context<MapAllocator>(&deleteMapAllocator);
581 }
582 
fromDataPtr(const at::DataPtr & dptr)583 RefcountedMapAllocator* RefcountedMapAllocator::fromDataPtr(const at::DataPtr& dptr) {
584   return dptr.cast_context<RefcountedMapAllocator>(&deleteRefcountedMapAllocator);
585 }
586 
makeDataPtr(c10::string_view filename,int flags,size_t size,size_t * actual_size_out)587 at::DataPtr MapAllocator::makeDataPtr(c10::string_view filename, int flags, size_t size, size_t* actual_size_out) {
588   auto* context = new MapAllocator(filename, flags, size);
589   if (actual_size_out) *actual_size_out = context->size();
590   return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
591 }
592 
makeDataPtr(WithFd,const char * filename,int fd,int flags,size_t size,size_t * actual_size_out)593 at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
594   auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size);
595   if (actual_size_out) *actual_size_out = context->size();
596   return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
597 }
598 
makeDataPtr(const char * filename,int flags,size_t size,size_t * actual_size_out)599 at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags, size_t size, size_t* actual_size_out) {
600   auto* context = new RefcountedMapAllocator(filename, flags, size);
601   if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
602   return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
603 }
604 
makeDataPtr(WithFd,const char * filename,int fd,int flags,size_t size,size_t * actual_size_out)605 at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
606   auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size);
607   if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
608   return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
609 }
610 
data() const611 void* RefcountedMapAllocator::data() const {
612   return static_cast<void*>(static_cast<char*>(base_ptr_) + map_alloc_alignment);
613 }
614 
~MapAllocator()615 MapAllocator::~MapAllocator() {
616   MapAllocator::close();
617   c10::reportMemoryUsageToProfiler(base_ptr_, -size_, 0, 0, c10::Device(c10::DeviceType::CPU));
618 }
619 
620 }  // namespace at
621