1 /** 2 Implements a range algorithm that caches element values. It turns 3 `InputRange`s into `ForwardRange`s as well as providing `opIndex`. 4 */ 5 6 module alid.cached; 7 8 import alid.errornogc : NogcError; 9 import alid.circularblocks : CircularBlocks; 10 import core.memory : pageSize; 11 import std.range : ElementType; 12 13 /** 14 A _range algorithm that caches the elements of a _range by evaluating them 15 only once. 16 17 Makes a new cache object that will be kept alive collectively with the 18 slices that it produces, first of which is returned. 19 20 As a natural benefit of caching, the returned _range is a `ForwardRange` 21 even if the source _range is an `InputRange`. Although the returned _range 22 provides `opIndex`, it is a `RandomAccessRange` only if the source _range 23 `hasLength`. 24 25 The version that takes buffers may never need to allocate memory if the 26 number of elements never exceed the capacity of the underlying 27 `CircularBlocks`. (See `CircularBlocks`.) 28 29 This `range` may mutate the underlying element blocks as needed even inside 30 its `front() inout ` function. For that reason, the behavior of some of its 31 operations are undefined if the provided `buffers` live on read-only memory. 32 33 Params: 34 35 range = the source _range to cache elements of 36 37 heapBlockCapacity = the minimum capacity to use for `CircularBlocks`, 38 which is used as storage for the _cached elements; 39 the default value attempts to use one page of memory 40 41 buffers = the _buffers to be used by the `CircularBlocks` member 42 43 Returns: 44 45 A _range that participates in the collective ownership of the _cached 46 elements 47 48 Bugs: 49 50 Although `opIndex` is provided, `isRandomAccessRange` produces `false` 51 because this implementation is not a `BidirectionalRange` at this time. 52 53 Todo: 54 55 Add `opApplyReverse` support and `BidirectionalRange` functions 56 57 */ 58 auto cached(R)(R range, size_t heapBlockCapacity = pageSize / ElementType!R.sizeof) 59 { 60 if (heapBlockCapacity == 0) 61 { 62 heapBlockCapacity = 100; 63 } 64 65 auto elements = CircularBlocks!(ElementType!R)(heapBlockCapacity); 66 auto theCacheObject = new ElementCache!R(range, elements, heapBlockCapacity); 67 68 // This first slice starts with offset 0 69 enum elementOffset = 0; 70 return theCacheObject.makeSlice(elementOffset); 71 } 72 73 /// Ditto 74 auto cached(R, size_t N, size_t M)(R range, ref ubyte[N][M] buffers) 75 { 76 auto elements = CircularBlocks!(ElementType!R)(buffers); 77 enum heapBlockCapacity = N; 78 auto theCacheObject = new ElementCache!R(range, elements, heapBlockCapacity); 79 80 enum elementOffset = 0; 81 return theCacheObject.makeSlice(elementOffset); 82 } 83 84 /// 85 unittest 86 { 87 // In the following typical usage, even though `slide(2)` would visit most 88 // elements more than once, `cached` guarantees that the lambda function 89 // executed by `map` will be executed only once per range element. 90 91 auto r = iota(4) 92 .map!(i => i * i) // [0, 1, 4, 9] 93 .cached 94 .slide(2) 95 .array; 96 97 // There are 3 sliding windows of width 2 over 4 elements 98 r.length.shouldBe(3); 99 r[0].shouldBe([0, 1]); 100 r[1].shouldBe([1, 4]); 101 r[2].shouldBe([4, 9]); 102 } 103 104 /// 105 unittest 106 { 107 // Random access over an InputRange 108 109 import std.algorithm : splitter; 110 import std.range : popFrontN; 111 112 // Make an InputRange 113 auto source() { 114 auto lines = "monday,tuesday,wednesday,thursday,friday,saturday,sunday"; 115 return lines.splitter(','); 116 } 117 118 // The source range does not provide random access: 119 static assert(!__traits(compiles, source[0])); 120 121 // The cached range does: 122 auto c = source.cached; 123 assert(c[2] == "wednesday"); // Note out-of-order indexing 124 assert(c[1] == "tuesday"); // This element is ready thanks to the previous line 125 126 c.popFrontN(3); 127 128 // Now index 0 is another element 129 assert(c[0] == "thursday"); 130 } 131 132 /// 133 unittest 134 { 135 // This version uses an existing buffer. It may never need to allocate 136 // additional heap memory. 137 138 ubyte[1024][2] buffers; 139 auto r = iota(4) 140 .map!(i => i * i) 141 .cached(buffers) // Will use the provided buffers 142 .slide(2) 143 .array; 144 145 r.length.shouldBe(3); 146 } 147 148 private mixin NogcError!"cached"; 149 150 /* 151 This is the implementation of the actual cache, elements of which will be 152 shared by potentially multiple CachedRange ranges. 153 */ 154 private struct ElementCache(R) 155 { 156 import std.array : empty; 157 import std.range : hasLength, ElementType; 158 import std.traits : isUnsigned; 159 160 // Useful aliases and values 161 alias ET = ElementType!R; 162 alias invalidSliceOffset = CachedRange!ElementCache.invalidSliceOffset; 163 enum rangeHasLength = hasLength!R; 164 165 R range; // The source range 166 CircularBlocks!ET elements; // The cached elements 167 size_t[] sliceOffsets; // The beginning indexes of slices into 'elements' 168 169 size_t liveSliceCount; // The number of slices that are still being used 170 size_t dropLeadingAttempts; // The number of times we considered but did 171 // not go with dropping leading elements 172 173 size_t minElementsToDrop; // The number of elements we consider large 174 // enough to consider for dropping 175 176 Stats stats_; 177 178 @disable this(this); 179 @disable this(ref const(typeof(this))); 180 181 this(R range, ref CircularBlocks!ET elements, size_t heapBlockCapacity) 182 { 183 import std.algorithm : move; 184 185 this.range = range; 186 this.elements = move(elements); 187 188 // Make it the same as heap capacity because it is likely that the 189 // underlying circular buffer will keep the elements alive below this 190 // figure anyway. 191 this.minElementsToDrop = heapBlockCapacity; 192 } 193 194 Stats stats() const 195 { 196 Stats result = stats_; 197 // Unlike other statistics figures, this value is not kept up-do-date by 198 // us 199 result.heapAllocations = elements.heapAllocations; 200 return result; 201 } 202 203 // Whether the parameter is valid as a slice id 204 bool isValidId_(in size_t id) const 205 { 206 return id < sliceOffsets.length; 207 } 208 209 // mixin string for throwing an Error 210 enum idError_ = `cachedError("Invalid id", id)`; 211 212 // Whether the specified slice is empty 213 bool emptyOf(in size_t id) 214 in (isValidId_(id), mixin (idError_)) 215 { 216 if (sliceOffsets[id] < elements.length) { 217 return false; 218 } 219 220 if (range.empty) { 221 return true; 222 } 223 224 const expanded = expandAsNeeded(id, 1); 225 return !expanded; 226 } 227 228 // The front element of the specified slice 229 auto frontOf(in size_t id) inout 230 in (isValidId_(id), mixin (idError_)) 231 { 232 // HACK: The following cast is undefined behavior when this object 233 // actually is placed on read-only memory. At least we have a note in 234 // the comments of cached() about read-only memory. Still, we don't 235 // expect this being the case because a range object such as this one is 236 // meant to be mutable to be consumed. 237 (cast()this).expandAsNeeded(id, 1); 238 return elements[sliceOffsets[id]]; 239 } 240 241 // Pop an element from the specified slice 242 void popFrontOf(in size_t id) 243 in (isValidId_(id), mixin (idError_)) 244 { 245 // Trivially increment the offset. 246 ++sliceOffsets[id]; 247 248 if (sliceOffsets[id] >= minElementsToDrop) { 249 /* 250 This slice has popped enough elements for us to consider dropping 251 leading elements. But we don't want to rush to it yet because even 252 determining whether there are unused elements or not incur some 253 cost. 254 255 We will apply the heuristic rule of ensuring this condition has 256 been seen at least as many times as there are live slices. (Even a 257 single slice is sufficient to hold on to the leading elements.) 258 */ 259 ++dropLeadingAttempts; 260 261 if (dropLeadingAttempts >= liveSliceCount) { 262 // We waited long enough 263 dropLeadingAttempts = 0; 264 const minIndex = unreferencedLeadingElements(); 265 266 if (minIndex) { 267 // There are leading elements that are not being used 268 // anymore 269 dropLeadingElements(minIndex); 270 271 // TODO: Commenting out for now because we can't be sure of 272 // usage patterns to decide to remove blocks from the 273 // underlying storage. (See a related 'version' in one 274 // of the unittest blocks.) 275 version (RUN_COMPACTION) 276 { 277 // Since we've dropped elements, let's also consider 278 // dropping unused blocks from the underlying circular 279 // buffer. 280 const occupancy = elements.heapBlockOccupancy; 281 282 if (occupancy.occupied && 283 (occupancy.total > occupancy.occupied * 4)) { 284 ++stats_.compactionRuns; 285 const removedBlocks = elements.compact(); 286 stats_.removedBlocks += removedBlocks; 287 } 288 } // version (none) 289 } 290 } 291 } 292 } 293 294 // The specified element (index) of the specified slice (id). 295 auto getElementOf(in size_t id, in size_t index) 296 in (isValidId_(id), mixin (idError_)) 297 { 298 expandAsNeeded(id, index + 1); 299 return elements[sliceOffsets[id] + index]; 300 } 301 302 // Make and return a new range object that has the same beginning index as 303 // the specified slice 304 auto saveOf(in size_t id) 305 in (isValidId_(id), mixin (idError_)) 306 { 307 return makeSlice(sliceOffsets[id]); 308 } 309 310 static if (rangeHasLength) 311 { 312 // The length of the specified slice 313 auto lengthOf(in size_t id) 314 in (isValidId_(id), mixin (idError_)) 315 { 316 return range.length + elements.length - sliceOffsets[id]; 317 } 318 } 319 320 // Create and return a RefCounted!CachedRange that initially references all 321 // currently cached elements beginning from the specified offset (element 322 // index) 323 auto makeSlice(in size_t offset) 324 { 325 import std.algorithm : find, move; 326 import std.typecons : refCounted; 327 328 auto id = size_t.max; 329 330 if (sliceOffsets.length == liveSliceCount) 331 { 332 // There is no unused spot; add one. 333 id = sliceOffsets.length; 334 sliceOffsets ~= offset; 335 } 336 else 337 { 338 // Find an unused spot in the slice offsets array. 339 id = 0; 340 foreach (so; sliceOffsets) { 341 if (so == invalidSliceOffset) { 342 break; 343 } 344 ++id; 345 } 346 assert(id < sliceOffsets.length, "Inconsistent 'liveSliceCount'"); 347 sliceOffsets[id] = offset; 348 } 349 350 ++liveSliceCount; 351 352 // Return a reference counted object so that its destructor will 353 // "unregister" it by removing it from the list of slices. 354 auto slice = CachedRange!ElementCache(&this, id); 355 return refCounted(move(slice)); 356 } 357 358 // Remove the specified slice; it does not own any element anymore 359 void removeSlice(in size_t id) 360 in (isValidId_(id), mixin (idError_)) 361 { 362 sliceOffsets[id] = invalidSliceOffset; 363 --liveSliceCount; 364 } 365 366 // Determine the number of leading elements that are not being referenced 367 // (used) by any slice 368 size_t unreferencedLeadingElements() 369 { 370 auto minOffset = size_t.max; 371 372 foreach (offset; sliceOffsets) 373 { 374 // There is no reason to continue because 0 is the absolute minimum 375 // for unsigned types. But let's verify our assumption. 376 static assert(isUnsigned!(typeof(offset))); 377 if (offset == minOffset.min) 378 { 379 return offset; 380 } 381 382 if (offset < minOffset) 383 { 384 minOffset = offset; 385 } 386 } 387 388 return minOffset; 389 } 390 391 // Drop specified number of leading elements from the cache 392 void dropLeadingElements(size_t n) 393 { 394 import std.algorithm : each, filter; 395 396 // Drop the unreferenced elements 397 elements.removeFrontN(n); 398 399 // Adjust the starting indexes of all slices accordingly. 400 sliceOffsets 401 .filter!((ref i) => i != invalidSliceOffset) 402 .each!((ref i) => i -= n); 403 404 ++stats_.leadingDropRuns; 405 stats_.droppedElements += n; 406 } 407 408 // Ensure that the specified slice has elements as needed. Returns 'true' if expanded. 409 auto expandAsNeeded(in size_t id, in size_t needed) 410 in (isValidId_(id), mixin (idError_)) 411 in (sliceOffsets[id] <= elements.length, 412 cachedError("Invalid element index", id, sliceOffsets[id], elements.length)) 413 { 414 import std.range : front, popFront; 415 416 const ready = elements.length - sliceOffsets[id]; 417 if (ready >= needed) 418 { 419 return false; 420 } 421 422 for (size_t missing = needed - ready; missing; --missing) 423 { 424 // Transfer one element from the source range to the cache 425 elements.emplaceBack(range.front); 426 range.popFront(); 427 } 428 429 return true; 430 } 431 } 432 433 /** 434 Statistics about the operation of `ElementCache` as well as the underlying 435 `CircularBlocks` that it uses for element storage. 436 */ 437 static struct Stats 438 { 439 /// Number of blocks allocated from the heap by the underlying 440 /// `CircularBlocks` storage 441 size_t heapAllocations; 442 443 /// Number of times the algorithm for dropping leading elements is executed 444 size_t leadingDropRuns; 445 446 /// Number of leading elements dropped 447 size_t droppedElements; 448 449 /// Number of times `CircularBlocks.compact` is executed 450 size_t compactionRuns; 451 452 /// Number of blocks removed due to compaction 453 size_t removedBlocks; 454 455 /// String representation of the statistics 456 void toString(scope void delegate(in char[]) sink) const 457 { 458 import std.format : formattedWrite; 459 460 sink.formattedWrite!"heap blocks allocated : %s\n"(heapAllocations); 461 sink.formattedWrite!"leading-element-drop executions : %s\n"(leadingDropRuns); 462 sink.formattedWrite!"total elements dropped : %s\n"(droppedElements); 463 sink.formattedWrite!"block compaction executions : %s\n"(compactionRuns); 464 sink.formattedWrite!"blocks removed due to compaction: %s\n"(removedBlocks); 465 } 466 } 467 468 version (unittest) 469 { 470 import alid.test : shouldBe; 471 import std.algorithm : equal, filter, map; 472 import std.array : array; 473 import std.range : iota, slide; 474 475 private 476 void consume(A)(ref A a) 477 { 478 while(!a.empty) 479 { 480 a.popFront(); 481 } 482 } 483 } 484 485 /** 486 Represents a `ForwardRange` (and a `RandomAccessRange` when possible) over 487 the cached elements of a source range. Provides the range algorithm 488 `length()` only if the source range does so. 489 490 Params: 491 492 EC = the template instance of the `ElementCache` template that this 493 range provides access to the elements of. The users are expected to 494 call one of the `cached` functions, which sets this parameter 495 automatically. 496 */ 497 // All pre-condition checks are handled by dispatched ElementCache member functions. 498 struct CachedRange(EC) 499 { 500 private: 501 502 EC * elementCache; 503 size_t id = size_t.max; 504 505 enum invalidSliceOffset = size_t.max; 506 507 // We can't allow copying objects of this type because they are intended to 508 // be reference counted. The users must call save() instead. 509 @disable this(this); 510 511 // Takes the ElementCache object that this is a slice of, and the assigned 512 // id of this slice 513 this(EC * elementCache, in size_t id) 514 { 515 this.elementCache = elementCache; 516 this.id = id; 517 } 518 519 public: 520 521 /** 522 Unregisters this slices from the actual `ElementCache` storage. 523 */ 524 ~this() 525 { 526 // Prevent running on an .init state, which move() leaves behind 527 if (elementCache !is null) 528 { 529 elementCache.removeSlice(id); 530 } 531 } 532 533 /** 534 Return statistics about the operation of `ElementCache` as well as the 535 underlying `CircularBlocks` that it uses for element storage 536 */ 537 Stats stats() const 538 { 539 return elementCache.stats; 540 } 541 542 /** 543 Support for `foreach` loops 544 */ 545 // This is needed to support foreach iteration because although 546 // RefCounted!CachedRange implicitly converts to CachedRange, which would be 547 // a range, the result gets copied to the foreach loop. Unfortunately, this 548 // type does not allow copying so we have to support foreach iteration 549 // explicitly here. 550 int opApply(int delegate(const(EC.ET)) func) 551 { 552 while(!empty) 553 { 554 int result = func(front); 555 if (result) 556 { 557 return result; 558 } 559 popFront(); 560 } 561 562 return 0; 563 } 564 565 /// Ditto 566 int opApply(int delegate(size_t, const(EC.ET)) func) 567 { 568 size_t counter; 569 while(!empty) 570 { 571 int result = func(counter, front); 572 if (result) 573 { 574 return result; 575 } 576 popFront(); 577 ++counter; 578 } 579 580 return 0; 581 } 582 583 // InputRange functions 584 585 /// Whether the range is empty 586 auto empty() 587 { 588 return elementCache.emptyOf(id); 589 } 590 591 /// The front element of the range 592 auto front() inout 593 { 594 return elementCache.frontOf(id); 595 } 596 597 /// Remove the front element from the range 598 void popFront() 599 { 600 elementCache.popFrontOf(id); 601 } 602 603 // ForwardRange function 604 605 /// Make and return a new `ForwardRange` object that is the equivalent of 606 /// this range object 607 auto save() 608 { 609 return elementCache.saveOf(id); 610 } 611 612 // RandomAccessRange function 613 614 /** 615 Return a reference to an element 616 617 The algorithmic complexity of this function is amortized *O(1)* because 618 it might need to cache elements between the currently available last 619 element and the element about to be accessed with the specified index. 620 621 Params: 622 623 index = the _index of the element to return 624 */ 625 auto opIndex(in size_t index) 626 { 627 return elementCache.getElementOf(id, index); 628 } 629 630 // An optional length 631 632 static if (EC.rangeHasLength) 633 { 634 /** 635 Number of elements of this range 636 637 This function is available only if the source range provides it. 638 */ 639 auto length() 640 { 641 return elementCache.lengthOf(id); 642 } 643 } 644 } 645 646 unittest 647 { 648 // Should compile and work with D arrays 649 650 [1, 2].cached.shouldBe([1, 2]); 651 } 652 653 unittest 654 { 655 // 0 as heapBlockCapacity should work 656 657 auto r = iota(10).cached(0); 658 r[5].shouldBe(5); 659 r.shouldBe(iota(10)); 660 } 661 662 unittest 663 { 664 // InputRange, ForwardRange, save, and .length() tests 665 666 import std.range : take; 667 668 auto r = iota(1, 11).cached; 669 assert(!r.empty); 670 r.front.shouldBe(1); 671 r.length.shouldBe(10); 672 673 r.popFront(); 674 assert(!r.empty); 675 r.front.shouldBe(2); 676 r.length.shouldBe(9); 677 678 r.take(4).shouldBe(iota(2, 6)); 679 assert(!r.empty); 680 r.front.shouldBe(6); 681 r.length.shouldBe(5); 682 683 auto r2 = r.save(); 684 r2.shouldBe(iota(6, 11)); 685 assert(r2.empty); 686 r2.length.shouldBe(0); 687 688 r.shouldBe(iota(6, 11)); 689 assert(r.empty); 690 r.length.shouldBe(0); 691 } 692 693 unittest 694 { 695 // Iteration tests 696 697 import std.traits : isIterable; 698 699 enum begin = 1; 700 auto r = iota(begin, 11).cached; 701 static assert (isIterable!(typeof(r))); 702 703 auto r2 = r.save(); 704 705 foreach (e; r) 706 { 707 if (e == 3) 708 { 709 break; 710 } 711 } 712 r.front.shouldBe(3); 713 714 // Fully consume for code coverage 715 foreach (e; r) 716 { 717 } 718 assert(r.empty); 719 720 foreach (i, e; r2) 721 { 722 e.shouldBe(i + begin); 723 724 if (e == 7) 725 { 726 break; 727 } 728 } 729 r2.front.shouldBe(7); 730 731 // Fully consume for code coverage 732 foreach (i, e; r2) 733 { 734 } 735 assert(r2.empty); 736 } 737 738 unittest 739 { 740 // RandomAccessRange tests 741 742 import std.algorithm : each; 743 744 auto r = iota(0, 7).cached; 745 iota(5, 0, -1).each!(i => r[i].shouldBe(i)); 746 } 747 748 unittest 749 { 750 // hasLength tests 751 752 import std.range : hasLength; 753 754 // Has length because iota has length 755 static assert ( hasLength!(typeof(iota(10).cached))); 756 757 // Does not have length because filter cannot have length 758 static assert (!hasLength!(typeof(iota(10).filter!(i => i % 2).cached))); 759 } 760 761 unittest 762 { 763 // Although it is discouraged to have side-effects in a map lambda, let's test 764 // with such a map to prove that the lambda is executed only once per element. 765 766 import std.algorithm : find; 767 768 enum n = 42; 769 enum nonExistingValue = n + 1; 770 size_t counter = 0; 771 772 // Ensure all operations will be evaluated by searching for nonExistingValue 773 // in all elements of all sliding window ranges. 774 const found = iota(n) 775 .map!((i) 776 { 777 ++counter; 778 return i; 779 }) 780 .cached 781 .slide(3) 782 .find!(window => !window.find(nonExistingValue).empty); 783 784 // Validate the test 785 assert(found.empty); 786 787 counter.shouldBe(n); 788 } 789 790 unittest 791 { 792 // Test that map expressions used by zip are cached as filter works on them. 793 794 import std.range : zip; 795 796 enum n = 10; 797 size_t count; 798 799 auto r = zip(iota(0, n) 800 .map!(_ => ++count)) 801 .cached 802 .filter!(t => t[0] == t[0]); // Normally, multiple executions 803 consume(r); 804 805 count.shouldBe(n); 806 } 807 808 unittest 809 { 810 // Out-of-bounds access should throw 811 812 import std.exception : assertThrown; 813 814 assertThrown!Error( 815 { 816 auto r = iota(5).cached; 817 int result = 0; 818 819 // More access than elements available 820 foreach (i; 0 .. 10) 821 { 822 result += r.front; 823 r.popFront(); 824 } 825 }()); 826 } 827 828 unittest 829 { 830 // The following is the code that exposed a difference between Rust's 831 // tuple_window and D's slide. tuple_window uses once() in its 832 // implementation so the generator lambda is executed only once per 833 // element. D's slide calls map's front multiple times. .cached is supposed 834 // to prevent that. 835 836 static struct Data 837 { 838 int * ptr; 839 size_t capacity; 840 } 841 842 enum n = 1_000; 843 844 int[] v; 845 846 const r = iota(n) 847 .map!((i) 848 { 849 v ~= i; 850 return Data(v.ptr, v.capacity); 851 }) 852 .cached // The lambda should be executed once per object 853 .slide(2) // [0,1], [1,2], [2,3], etc. 854 .filter!((t) 855 { 856 const prev = t.front.ptr; 857 t.popFront(); 858 const curr = t.front.ptr; 859 return prev != curr; 860 }) 861 .map!(t => t.front.capacity) 862 .array; 863 864 v.length.shouldBe(n); 865 866 // Make 'dub lint' happy by using the variable 867 assert(r.length == r.length); 868 } 869 870 unittest 871 { 872 // A single slice should be sufficient to keep all elements alive 873 874 import std.algorithm : each; 875 import std.conv : to; 876 877 // Picking a large number of elements along with very small capacity to 878 // cause at least one consideration of dropping the front elements 879 enum n = 10_000; 880 enum heapBlockCapacity = 100; 881 882 auto r = iota(n).cached(heapBlockCapacity); 883 884 { 885 // Create saved states of the range 886 auto saveds = iota(4).map!(_ => r.save()).array; 887 888 // This operation will ensure populating the element cache 889 consume(r); 890 891 // No leading element should have been dropped 892 assert(saveds[0].length == n); 893 894 // Consume all saved states but one 895 foreach (ref s; saveds[1..$]) 896 { 897 consume(s); 898 } 899 900 // Still, no leading element should have been dropped 901 assert(saveds[0].length == n); 902 903 // Finally, there should be dropping of leading elements 904 consume(saveds[0]); 905 } 906 907 // We expect non-zero figures 908 const stats = r.stats; 909 assert(stats.heapAllocations); 910 assert(stats.leadingDropRuns); 911 assert(stats.droppedElements); 912 version (RUN_COMPACTION) 913 { 914 assert(stats.compactionRuns); 915 assert(stats.removedBlocks); 916 } else { 917 assert(stats.compactionRuns == 0); 918 assert(stats.removedBlocks == 0); 919 } 920 921 // For code coverage 922 stats.to!string; 923 } 924 925 unittest 926 { 927 // Works with cycle, which has a 'const' front(): 928 929 import std.range : cycle, take; 930 931 iota(10).cached.cycle.take(30); 932 }