-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathjakel101_shaders.json
More file actions
3015 lines (3015 loc) · 301 KB
/
jakel101_shaders.json
File metadata and controls
3015 lines (3015 loc) · 301 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{
"userName": "jakel101",
"date": "2025-09-10T23:32:03.814290+00:00",
"numShaders": 58,
"shaders": [
{
"ver": "0.1",
"info": {
"id": "wc2yWh",
"date": "1756592684",
"viewed": 90,
"name": "glass ball bouncing on heightmap",
"username": "jakel101",
"description": "I wanted to try 3D physics ontop of my heightmap shader... first goal is a single ball - but maybe we can do a 2nd one. (one glass and one light?)",
"likes": 5,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"25d",
"heightmap",
"physics",
"pathracing"
],
"hasliked": 0,
"retrieved": "2025-09-10T23:32:03.814290+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents \\_%_/\n\n/* Image pass shader to draw a texture/buffer/input as a heightmap\n* with some pathtracing as pillars of pixels.\n* Meant to be used in multiple projects and therefore\n* easily configurable at the top with a few macros\n*\n* selflink: https://www.shadertoy.com/view/M3VBWt\n* other projects using this shader/framework: https://www.shadertoy.com/playlist/mX2cD3\n* \n* work in progress:\n* todo(ideas):\n* - infinite/LOD tiles?\n* - DDA like traversal\n* - cleanup as usual\n* feedback/improvements welcome here.\n*/\n\n\n# define PI 3.141592653\n\n\n# define CELLS ivec2(64)\n//# define CELLS ivec2(3)\n\n// unsure yet where to bring this!\n# define SUN normalize(vec3(sin(iDate.w*0.5), cos(iTime), HEIGHT_SCALE*1.5))\n// normalize(vec3(3.0, -5.0, 2.0))\n\n// horizontal FOV, if you use negative values the camera will be orthographic!\n// examples:\n// FOV -1.0 for orthographic (sensor size)\n// FOV 90.0 for perspective wide\n// FOV 45.0 for perspective narower\n# define FOV 90.0\n\n// how far \"behind\" the camera is behind the arcball\n# define CAMERA_DIST 0.15\n\n\n// TODO one variable to change between sampled and direct light\n// 0 -> directional light\n// 1 -> point light\n// 2 -> MIS? (one light, one sampled?)\n// 3+ -> bounces//samples?\n# define BOUNCES 4\n# define SAMPLES 8\n\nstruct Material{\n vec3 col; // ground color (or texture?)\n float emissivity; //emitted light in some unit?\n float roughness; // invers reflectivity, sorta\n float translucency; // something like 1.0 for glass and 0.0 for solids? -> rays split/sample/refract??\n float IOR; // index of refraction\n};\n\n// edit these here to change the look and feel!\nMaterial chalk = Material(vec3(1.0), 0.0, 0.95, 0.0, 1.3);\nMaterial ground = Material(vec3(0.5), 0.0, 0.95, 0.0, 0.0);\nMaterial sky = Material(vec3(0.02, 0.3, 0.85), 0.2, 0.50, 0.0, 0.0);\nMaterial glass = Material(vec3(1.0), 0.0, 0.02, 1.0, 1.33);\n\n\nstruct Ray{\n vec3 origin;\n vec3 dir;\n vec3 inv_dir; // for speedup?\n};\n\n// helper constructor\nRay newRay(vec3 ro, vec3 rd){\n return Ray(ro, rd, 1.0/rd);\n}\n\n\nstruct IntersectionInfo{\n bool hit;\n // rest illdefined for a miss\n bool inside;\n vec3 entry;\n vec3 exit;\n vec3 entry_norm;\n vec3 exit_norm;\n float entry_dist;\n float exit_dist;\n};\n\n// sorta reference: https://tavianator.com/2022/ray_box_boundary.html\nIntersectionInfo AABB(vec3 center, vec3 size, Ray ray){\n IntersectionInfo res;\n\n vec3 pos = center + size;\n vec3 neg = center - size;\n\n vec3 pos_dist = (pos-ray.origin) * ray.inv_dir;\n vec3 neg_dist = (neg-ray.origin) * ray.inv_dir;\n\n vec3 min_dist = min(pos_dist, neg_dist);\n vec3 max_dist = max(pos_dist, neg_dist);\n\n res.entry_dist = max(max(min_dist.x, min_dist.y), min_dist.z);\n res.exit_dist = min(min(max_dist.x, max_dist.y), max_dist.z);\n\n // normals point away from the center\n res.entry_norm = -sign(ray.dir) * vec3(greaterThanEqual(min_dist, vec3(res.entry_dist)));\n res.exit_norm = sign(ray.dir) * vec3(lessThanEqual(max_dist, vec3(res.exit_dist)));\n\n // essentially methods?\n res.entry = ray.origin + ray.dir*res.entry_dist;\n res.exit = ray.origin + ray.dir*res.exit_dist;\n\n res.hit = res.entry_dist < res.exit_dist && res.exit_dist > 0.0;\n res.inside = res.entry_dist < 0.0; // entry behind us\n\n return res;\n}\n\n// with help from: https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-sphere-intersection.html\nIntersectionInfo Sphere(vec3 center, float radius, Ray ray){\n IntersectionInfo res;\n vec3 local = ray.origin - center;\n \n float a = dot(ray.dir, ray.dir);\n float b = 2.0* dot(ray.dir, local);\n float c = dot(local, local) - pow(radius,2.0);\n \n float discriminant = pow(b,2.0) - 4.0*a*c;\n \n res.hit = discriminant >= 0.0;\n \n float t0 = (-b + sqrt(discriminant))/ (2.0*a);\n float t1 = (-b - sqrt(discriminant))/ (2.0*a);\n\n res.entry_dist = min(t0, t1);\n res.exit_dist = max(t0, t1);\n \n if (res.entry_dist < 0.0 && res.exit_dist < 0.0){\n res.hit = false;\n }\n\n res.entry = ray.origin + ray.dir * res.entry_dist;\n res.exit = ray.origin + ray.dir * res.exit_dist;\n\n res.entry_norm = normalize(res.entry - center);\n res.exit_norm = normalize(res.exit - center);\n \n res.inside = res.entry_dist < 0.0 && res.exit_dist > 0.0; // entry behind us\n\n return res;\n}\n\nvec4 sampleHeight(ivec2 cell){\n // to allow for more complex math to determine height\n // .rgb should just return the texture color or some modification of it\n //cell.x = (cell.x + iFrame) % int(iChannelResolution[0].x); // fun texture scroll\n vec4 tex = texelFetch(iChannel0, cell, 0);\n vec4 res;\n res.a = tex.r + tex.g + tex.b; // we do height by a sum of the color for now\n res.a *= 0.33;\n res.rgb = tex.rgb; // simply copy the color as the \"texture\" for now\n \n // res.a = tex.a; // debug/use existing height data.\n res.a *= HEIGHT_SCALE;\n return res;\n}\n\n\n\nIntersectionInfo pillar_hits(ivec2 cell, float height, Ray ray){\n // let's move the pillar into world space by having it's center + extends\n\n vec3 extend = vec3(1.0/vec2(CELLS), abs(height)*0.5);\n vec3 p = vec3(cell.xy, abs(height)*0.5);\n p.xy *= extend.xy;\n p.xy *= 2.0;\n p.xy -= 1.0 - extend.xy; // not quite the offset?\n //extend.z = extend.y; // make them cubes?\n\n // for the case of clouds the box is at the top?\n if (height < 0.0){\n p.z = HEIGHT_SCALE*(1.0-abs(height*0.5));\n }\n\n // TODO: redo this math when less asleep...\n IntersectionInfo res = AABB(p, extend, ray);\n return res;\n}\n\n\n\n// from: https://www.shadertoy.com/view/7l3yRn\nvec2 get_random_numbers(inout uvec2 seed) {\n // This is PCG2D: https://jcgt.org/published/0009/03/02/\n seed = 1664525u * seed + 1013904223u;\n seed.x += 1664525u * seed.y;\n seed.y += 1664525u * seed.x;\n seed ^= (seed >> 16u);\n seed.x += 1664525u * seed.y;\n seed.y += 1664525u * seed.x;\n seed ^= (seed >> 16u);\n // Convert to float. The constant here is 2^-32.\n return vec2(seed) * 2.32830643654e-10;\n}\n\n// also from above\n// TODO collaplse into one function!\n// Given uniform random numbers u_0, u_1 in [0,1)^2, this function returns a\n// uniformly distributed point on the unit sphere (i.e. a random direction)\n// (omega)\nvec3 sample_sphere(vec2 random_numbers) {\n float z = 2.0 * random_numbers[1] - 1.0;\n float phi = 2.0 * PI * random_numbers[0];\n float x = cos(phi) * sqrt(1.0 - z * z);\n float y = sin(phi) * sqrt(1.0 - z * z);\n return vec3(x, y, z);\n}\n\n\n// Like sample_sphere() but only samples the hemisphere where the dot product\n// with the given normal (n) is >= 0\nvec3 sample_hemisphere(vec2 random_numbers, vec3 normal) {\n vec3 direction = sample_sphere(random_numbers);\n if (dot(normal, direction) < 0.0)\n direction -= 2.0 * dot(normal, direction) * normal;\n return direction;\n}\n\n\nstruct RaycastInfo{\n bool hit; // if negative, the rest is undefined.\n float dist; // hit_info.entry_dist redundant?\n //ivec2 cell; //current_cell?\n IntersectionInfo hit_info; //has the entry norm etc.\n vec3 col; // TODO: replace with material\n //Ray ray; //just as a reference?\n};\n\n\nRaycastInfo raycast(Ray ray){\n // cast the ray untill there is a hit or we exit the box\n // \"any hit\" shader?\n RaycastInfo result;\n \n IntersectionInfo box = AABB(vec3(0.0, 0.0, HEIGHT_SCALE*0.5), vec3(1.0, 1.0, HEIGHT_SCALE*0.5), ray);\n\n vec3 entry = box.entry;\n\n if (!box.hit){\n // if we \"MISS\" the whole box (not inside?).\n result.hit = false;\n return result;\n \n }\n // everything below here is inside the box\n if (box.inside){\n // if we are \"inside\" the entry should just be ro!\n entry = ray.origin; // maybe problems with distance caluclations at the end?\n }\n \n ivec2 current_cell = worldToCell(entry); // TODO: this one is problematic!\n int i;\n ivec2 max_cells = CELLS - min(current_cell, CELLS-current_cell);\n int max_depth = (max_cells.x + max_cells.y)+2; // could also be min!\n for (i = 0; i < max_depth; i++){\n if (current_cell.x < 0 || current_cell.x >= CELLS.x ||\n current_cell.y < 0 || current_cell.y >= CELLS.y){\n // we marched far enough are are \"outside the box\" now!\n result.hit = false; \n return result;\n }\n\n vec4 tex = sampleHeight(current_cell);\n IntersectionInfo pillar = pillar_hits(current_cell, tex.a, ray);\n\n if (pillar.hit) {\n // \"any hit\" (side/top/bot) -> loop ends here\n // do a little bit of light sim by doing diffuse \"block of chalk\"\n vec3 col = tex.rgb;\n // TODO materail decision here?\n result.hit = true;\n result.hit_info = pillar;\n result.dist = pillar.entry_dist;\n result.col = col;\n return result; \n }\n\n // check if our exit distance larger than the box, means we should be at the final pillar...\n if (pillar.exit_dist >= box.exit_dist){\n result.hit = false;\n return result; // do we ever get here?\n }\n\n // the step\n // TODO: DDA style decision\n ivec2 next_cell = current_cell + ivec2(pillar.exit_norm.xy);\n if (next_cell == current_cell){\n // in this case we do another raycast - but without any Z component\n // so the vector is sideways and points to a new cell!\n vec3 flat_rd = vec3(ray.dir.xy, 0.0);\n Ray flat_ray = Ray(ray.origin, flat_rd, 1.0/flat_rd);\n\n IntersectionInfo grid = pillar_hits(current_cell, 1.0, flat_ray);\n next_cell += ivec2(grid.exit_norm.xy); // TODO check if this norm is correct!\n }\n // for next iteration\n current_cell = next_cell;\n }\n \n result.hit = false;\n return result;\n}\n\n// more like a bad shadowmap\n// idea for the future: precompute the horizon per pixel: https://youtu.be/LluCbGdi-RM\nfloat directional_light(Ray sun_ray, vec3 normal){\n // return the amount of shadowed?\n // we are now marching upwards from some hit\n // ro is essentially the point we started from\n // rd is the sun angle\n RaycastInfo res = raycast(sun_ray);\n //return res.a;\n \n //TODO: intensity/color?\n \n float amt = 1.0;\n \n \n if (!res.hit){// || (ro + rd*res.a).z >= HEIGHT_SCALE){\n // miss means full sunlight!\n amt = max(0.0, dot(sun_ray.dir, normal));\n }\n else {\n // TODO: use distance?\n amt = 0.1; // additional ambient light from here?\n }\n return amt;\n}\n\n// struct for lights? colored light?\nfloat point_light(vec3 start, vec3 light_pos, float light_intensity, vec3 normal){\n float amount;\n \n vec3 light_dir = normalize(light_pos - start);\n float light_dist = distance(start, light_pos);\n // Ray(hit+0.001*SUN, SUN, 1.0/SUN);\n Ray light_cast = Ray(start + 0.001*light_dir, light_dir, 1.0/light_dir);\n RaycastInfo res = raycast(light_cast);\n \n if (!res.hit || res.dist > light_dist) {\n // either we miss geometry or we hit gometry behind the light\n amount = inversesqrt(light_dist)* light_intensity;\n amount *= max(0.0, dot(normal, light_dir));\n }\n else {\n // hit an intersection before the light, so don't see the light!\n amount = 0.0; \n }\n \n // TODO still needs dot normal!\n return amount;\n}\n\n\n\n// copied from https://www.shadertoy.com/view/M3jGzh\nfloat checkerboard(vec2 check_uv, float cells){\n check_uv *= cells/2.0;\n float rows = float(mod(check_uv.y, 1.0) <= 0.5);\n float cols = float(mod(check_uv.x, 1.0) <= 0.5);\n return float(rows == cols);\n}\n\nstruct HitInfo{\n Material mat;\n float dist;\n vec3 norm;\n vec3 pos;\n bool inside; // for doing glass rays!\n};\n\n\nHitInfo sampleGround(vec3 ro, vec3 rd){\n HitInfo res;\n // TODO: rename to sample skybox maybe? as the ground is sorta part of that...\n float ground_height = 0.0;\n float ground_dist = (ground_height-ro.z)/rd.z;\n // TODO: use the actual sphere for the \"skybox\"\n if (ground_dist < 0.0 ||ground_dist > 10.0) {\n // essentially sky hit instead?\n // just some random skybox right now... could be improved of course!\n vec3 col = vec3(0.23, 0.59, 0.92)*exp(dot(SUN, rd)-0.8);\n col = clamp(col, vec3(0.0), vec3(1.0));\n \n res.mat = sky;\n \n res.mat.col = col; // no longer matches with \"sky\" - so gotta change the above maybe?\n \n res.dist = 10.0;\n res.pos = ro + rd*res.dist;\n res.mat.emissivity *= clamp(smoothstep(res.dist - 8.1, res.dist- 3.0, res.pos.z), 0.0, 1.0);\n res.norm = -rd;\n return res; // some random distance that is positive!\n }\n\n vec3 ground_hit = ro + (rd * ground_dist);\n\n float val = checkerboard(ground_hit.xy, 8.0)* 0.25;\n val += 0.45;\n //val *= 2.0 - length(abs(ground_hit));\n\n // fake sun angle spotlight... TODO actual angle and normal calculation!\n //val *= 2.5 - min(2.3, length((-SUN-ground_hit)));//,vec3(0.0,0.0,1.0));\n\n vec3 col = vec3(val);\n res.mat = ground;\n res.mat.col = col;\n res.dist = ground_dist;\n res.pos = ground_hit;\n res.norm = vec3(0.0, 0.0, 1.0);\n return res;\n}\n\n// TODO for montecarlo we need an external loop around this!\nHitInfo scene(Ray camera, vec3 ball_pos, vec3 ball2_pos){\n HitInfo res;\n \n // terrain\n RaycastInfo terrain = raycast(camera);\n\n // ball\n IntersectionInfo ball = Sphere(ball_pos, BALL_SIZE, camera);\n IntersectionInfo ball2 = Sphere(ball2_pos, BALL_SIZE, camera); // TODO: ballsize in .w data point? so they can some how change dynamically?\n\n // five cases: just terrain hit, ball hit, both miss, both hit terrain closer, both hit ball closer\n // idea: get all hits, then calculate closest (sorted?) and then return that. if none return background\n // TODO: redo logic (dynamic arrays?)\n \n // front first?\n res.dist = 1000.0;\n \n if (ball.hit && ball.entry_dist < res.dist) {\n // ball infront of the terrain\n res.dist = ball.entry_dist;\n res.mat = glass; // TODO: glass material?\n res.norm = ball.entry_norm;\n res.pos = ball.entry; \n res.inside = ball.inside;\n if (res.inside) {\n res.dist = ball.exit_dist;\n res.norm = ball.exit_norm;\n res.pos = ball.exit;\n } \n } \n if (ball2.hit && ball2.entry_dist < res.dist) {\n // ball infront of the terrain\n res.dist = ball2.entry_dist;\n res.mat = glass; // TODO: glass material?\n res.mat.emissivity = 3.5; // 2nd ball is a light source!\n res.norm = ball2.entry_norm;\n res.pos = ball2.entry; \n res.inside = ball2.inside;\n if (res.inside) {\n res.dist = ball2.exit_dist;\n res.norm = ball2.exit_norm;\n res.pos = ball2.exit;\n }\n }\n if (terrain.hit && terrain.hit_info.entry_dist < res.dist) {\n // terrain infront of the ball\n res.dist = terrain.hit_info.entry_dist;\n res.mat = chalk;\n res.mat.col = terrain.col; // TODO: material construction\n res.norm = terrain.hit_info.entry_norm;\n res.pos = terrain.hit_info.entry;\n res.inside = terrain.hit_info.inside;\n if (res.inside) {\n res.dist = terrain.hit_info.exit_dist;\n res.norm = terrain.hit_info.exit_norm;\n res.pos = terrain.hit_info.exit;\n } \n }\n if (res.dist > 900.0) {\n // miss here, but badly scaled \"skybox\"\n res = sampleGround(camera.origin, camera.dir);\n }\n \n\n return res;\n}\n\n// follow ? https://www.shadertoy.com/view/7l3yRn\nstruct RayRadiance{\n vec3 radiance;\n vec3 throughput_weight;\n};\n\n// reading: https://www.pbr-book.org/4ed/Radiometry,_Spectra,_and_Color/Surface_Reflection\n// further: https://www.pbr-book.org/4ed/Reflection_Models\n// watching maybe: https://youtu.be/wA1KVZ1eOuA\nvec3 brsf(in vec3 rd, in HitInfo hit, inout vec3 next_dir, inout uvec2 seed){\n // returns the outgoing radiance?\n // as well as the next ray direction. (inout)\n \n Material mat = hit.mat;\n vec3 norm = hit.norm;\n // naive reflection model\n vec3 perfect_reflection = reflect(rd, norm);\n next_dir = mix(perfect_reflection, next_dir, mat.roughness);\n \n //native transmission model\n\n vec2 randoms = get_random_numbers(seed);\n if (randoms.x < mat.translucency) {\n float IOR = hit.inside ? mat.IOR : 1.0/mat.IOR;\n vec3 reflect_norm = hit.inside ? norm : -norm;\n vec3 perfect_refraction = refract(rd, -reflect_norm, IOR);\n next_dir = mix(perfect_refraction, -next_dir, mat.roughness);\n //next_dir = perfect_refraction;\n norm = reflect_norm;\n }\n\n vec3 outgoing = mat.col * 2.0 * max(0.0, dot(norm, next_dir));\n return outgoing;\n}\n\n\n\n// factored out to function so the seed changes correctly due to inout -.-\nvec3 get_ray_radiance(Ray camera, vec3 ball_pos, vec3 ball2_pos, inout uvec2 seed){\n //after get_ray_radiance in https://www.shadertoy.com/view/7l3yRn\n\n vec3 radiance = vec3(0.0);\n vec3 throughput_weight = vec3(1.0);\n\n int i;\n for(i=0; i<=BOUNCES; i++){\n HitInfo first_hit = scene(camera, ball_pos, ball2_pos);\n radiance += throughput_weight * first_hit.mat.emissivity;\n \n // initialize with random here??\n vec3 next_dir = sample_hemisphere(get_random_numbers(seed), first_hit.norm); \n vec3 outgoing_radiance = brsf(camera.dir, first_hit, next_dir, seed);\n \n throughput_weight *= outgoing_radiance;\n camera = newRay(first_hit.pos+0.0001*next_dir, next_dir);\n }\n\n return radiance;\n}\n\n\n\n// TODO: sample hemisphere function\n// TODO: brdf kinda function where it gives you a new direction based on material.\n// TODO: calucalte the light from that brdf too? HitInfo2 -> RayRadiance, next_dir\n// multiple importance sampling? following: https://lisyarus.github.io/blog/posts/multiple-importance-sampling.html\n// idea being we sample the direct light or direction light once, and then do one random sample. weight them 50/50?\n// TODO: call scene below and loop it?\n// 1. cast scene, 2. accumulate light, 3. get next dir, LOOP\n// add a MAX_bounces or SPP var at the top.\n\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // uv normalized to [-1..1] for height with more width\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n vec2 mo = (2.0*iMouse.xy - iResolution.xy)/iResolution.y;\n \n \n uvec2 seed = uvec2(fragCoord) ^ uvec2(iFrame << 16);\n vec3 ball_pos = texelFetch(iChannel0, ivec2(0,0), 0).xyz;\n vec3 ball2_pos = texelFetch(iChannel0, ivec2(8,0), 0).xyz; // janky proof of concept for now!\n //fragColor = texture(iChannel0, uv);\n //return;\n\n // for when it's just idling...\n float azimuth = -1.9+iTime*0.1 + mo.x; // keeps a bit of residue of the mouse!\n float altitude = 0.7+cos(1.5+iTime*0.25)*0.35;\n if (sign(iMouse.z) > 0.0){\n // orbiting camera setup\n azimuth = PI*mo.x;\n altitude = 0.5*PI*clamp(mo.y+1.0, -0.01, 0.99); // maybe just positive?\n }\n\n // make sure you don't look \"below\"\n altitude = clamp(altitude, HEIGHT_SCALE*0.2, PI);\n\n // a unit length orbit!\n vec3 camera_pos = vec3(\n cos(azimuth)*cos(altitude),\n sin(azimuth)*cos(altitude),\n sin(altitude));\n // the camera is always looking \"at\" the origin or half way above it\n \n camera_pos = ball_pos + CAMERA_DIST*camera_pos;\n \n vec3 nodal = vec3(0.0, 0.0, HEIGHT_SCALE*0.5);\n vec3 look_at = mix(ball_pos, nodal, 0.05); // so it's not crazy locked on \n \n vec3 look_dir = normalize(look_at-camera_pos);\n\n\n // TODO moving the camera in and out over time??\n //camera_pos += look_dir * CAMERA_DIST; // moving the camera \"back\" to avoid occlusions?\n // two vectors orthogonal to this camera direction (tagents?)\n //vec3 look_u = camera_pos + vec3(-sin(azimuth), cos(azimuth), 0.0);\n //vec3 look_v = camera_pos + vec3(sin(altitude)*-cos(azimuth), sin(altitude)*-sin(azimuth), cos(altitude));\n\n\n // turns out analytically these aren't correct. so using cross instead -.-\n vec3 up_vec = vec3(0.0, 0.0, 1.0);\n vec3 look_u = normalize(cross(look_dir, up_vec));\n vec3 look_v = normalize(cross(look_u, look_dir)); // is this faster?\n // camera plane(origin of each pixel) -> barycentric?\n\n vec3 camera_plane;\n vec3 ray_dir;\n vec3 ray_origin;\n\n if (FOV > 0.0){\n // assume a pinhole camera.\n // FOV is the horizontal fov, the given focal length becomes:\n // the 1.0 is the sensor height.\n float focal_length = 1.0/tan(radians(FOV*0.5));\n\n // the ro\n camera_plane = camera_pos - (look_dir*focal_length) + ((look_u*uv.x) + (look_v*uv.y))*-1.0; // inverted here to see upright\n ray_origin = camera_pos;\n\n // the rd\n ray_dir = camera_pos-camera_plane;\n ray_dir = normalize(ray_dir);\n }\n\n else {\n // negative FOV values are interpreted as a sensor size for a orthographic camera!\n // horizontal sensor size, -1 would be something sensible... everything else is far away\n float sensor_size = FOV*0.5*-1.0;\n camera_plane = camera_pos + ((look_u*uv.x)+(look_v*uv.y))*sensor_size; // wider fov = larger \"sensor\"\n ray_dir = look_dir;\n ray_origin = camera_plane;\n }\n\n // todo extract to a function\n // Ray in -> material/normal out?\n // caluclate and aggregate light throughput?\n // new ray direction based on sampled material/refraction?\n\n Ray camera = newRay(ray_origin, ray_dir);\n vec3 out_col = vec3(0.0);\n\n int j;\n for(j=0; j<SAMPLES; ++j){\n vec3 rad = get_ray_radiance(camera, ball_pos, ball2_pos, seed);\n out_col += rad;\n }\n // average color over all samples\n out_col /= float(SAMPLES);\n \n // TODO gamma correction?\n fragColor = vec4(out_col, 1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [
{
"id": 30,
"src": "/media/a/f735bee5b64ef98879dc618b016ecf7939a5756040c2cde21ccb15e69a6e1cfb.png",
"ctype": "texture",
"channel": 1,
"sampler": {
"filter": "nearest",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "//buffer pass does the physics simulation!\n// lots of todo to find here!\n\n\n# define SUBSTEPS 16\n# define GRAVITY -9.8\n# define SIM_SPEED 0.3\n\nvec4 init(vec2 uv){\n //return vec4(uv.x, uv.y, 0.5, 1.0);\n vec4 tex = texture(iChannel1, uv/vec2(CELLS)); \n // TODO add dt here\n return tex;\n}\n\n\n// TODO: can't put in common as the sampler is requied - will break wgpu -.-\nvec4 sampleHeight(vec4 tex, float anim_time, vec2 uv){\n\n // modulate from above: \n tex = 0.5+ sin(tex*6.0 + 0.6*(anim_time)+vec4(uv.x,uv.y,1,0)) * 0.5;\n\n //vec4 tex = texelFetch(iChannel0, cell, 0); // TODO: skip resample\n vec4 res;\n res.a = tex.r + tex.g + tex.b; // we do height by a sum of the color for now\n res.a *= 0.33;\n res.rgb = tex.rgb; // simply copy the color as the \"texture\" for now\n \n // res.a = tex.a; // debug/use existing height data.\n res.a *= HEIGHT_SCALE;\n return res;\n}\n\n\n// TODO box - sphere intersection?\n// sph, box -> error/direction/scale\n// sum it all up, recaclucalte velocity? (Position based dynamics?)\n\n\n// TODO: ball box intersection\n// TODO ball, ball intersection? (how to cross write?)\n\n\n\n// TODO: fix the bounces and rewatch https://youtu.be/j84zJ06wnVA\nvoid animate(float dt, int sub_step, in vec2 uv, in vec3 pos, in vec3 vel, out vec3 new_pos, out vec3 new_vel){ \n new_vel = vel;\n new_vel.z += GRAVITY *dt;\n new_pos = pos + new_vel *dt;\n \n // basic bouce on the ground:\n // TODO check the one pillar on the contact point my mapping uv back to texture I guess.\n float anim_time = iTime + float(sub_step) * dt; \n vec4 tex = init(new_pos.xy);\n float height = sampleHeight(tex, dt, uv).a;\n if ((new_pos.z - RADIUS) < height) {\n // TODO restitution\n new_pos.z = pos.z; // TODO don't stop here\n new_vel.z *= -1.0;\n }\n \n // sides at 1.0 and -1.0 in all directions\n // TODO bvec here?\n // probably scale by the overshoot...\n if ((abs(new_pos.x) + RADIUS) > 1.0) {\n float error = (abs(new_pos.x) + RADIUS) - 1.0;\n new_vel.x *= -1.0;\n \n new_pos.x += new_vel.x*error; \n }\n if ((abs(new_pos.y) + RADIUS) > 1.0) {\n new_pos.y = pos.y;\n new_vel.y *= -1.0;\n }\n \n // TODO check all pillars below the ball!\n // TODO sidewalls bounce a pillar\n // how to do a sideways bounce?\n}\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // todo not to the limited 64x64 scale of the world -.-\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n \n if (fragCoord.x > 65.0 || fragCoord.y > 65.0){\n discard;\n }\n \n ivec2 st = ivec2(fragCoord);\n // since we need to store at least 6 values, we mask them with bools here\n // TODO maybe macros?\n \n // store a ball per row? // TODO -> NUM_BALLS common define?\n int BALL_ID = int(fragCoord.x); \n ivec2 POS_MEM = ivec2(BALL_ID,0);\n ivec2 VEL_MEM = ivec2(BALL_ID,1);\n bool STORE_POS = (st == POS_MEM) ? true : false;\n bool STORE_VEL = (st == VEL_MEM) ? true : false;\n \n vec3 pos = texelFetch(iChannel0, POS_MEM, 0).xyz;\n vec3 vel = texelFetch(iChannel0, VEL_MEM, 0).xyz; \n \n vec4 col = vec4(0.0);\n vec4 tex; // values at the \"start\" of this timestep?\n if (iFrame < 2){\n col = init(vec2(st)/64.0);\n pos = vec3(0.0, 0.0, 0.75);\n vel = vec3(uv.x, 2.5, 0.1);\n }\n // if (!STORE_POS || !STORE_VEL)\n else{\n tex = init(vec2(st));\n col = sampleHeight(tex, iTime, uv);\n //discard; \n }\n \n vec3 new_pos = pos;\n vec3 new_vel = vel;\n \n float dt = (iTimeDelta/float(SUBSTEPS))*SIM_SPEED;\n // TODO extract to function\n int i;\n for (i=0; i < SUBSTEPS; i++){\n animate(dt, i, uv, pos, vel, new_pos, new_vel);\n pos = new_pos;\n vel = new_vel;\n } \n \n // TODO maybe \n // write only the relavent part?\n col.rgb = STORE_POS ? new_pos : col.rgb;\n col.rgb = STORE_VEL ? new_vel : col.rgb;\n fragColor = col;\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
},
{
"inputs": [],
"outputs": [],
"code": "// a few shared things\n# define RADIUS 0.035\n# define BALL_SIZE RADIUS\n\n\n# define CELLS ivec2(64)\n# define HEIGHT_SCALE 0.2\n\n\nivec2 worldToCell(vec3 p) {\n // move world space again\n p += 1.0;\n p *= 0.5;\n ivec2 st = ivec2((p.xy*vec2(CELLS.xy)));\n // TODO: find an actual solution to the edge cases!\n st = min(st, CELLS -1);\n return st;\n}\n",
"name": "Common",
"description": "",
"type": "common"
}
]
},
{
"ver": "0.1",
"info": {
"id": "wXVSR1",
"date": "1754338508",
"viewed": 84,
"name": "Double Pendulum Datacube",
"username": "jakel101",
"description": "this idea came when I watched https://youtu.be/9gQQAO4I1Ck\nthe screenspace essentially picks different initial conditions, keeping track of position and momentum for the inner and outer pendulum. use mouse to inspect a specific pendulum!\n",
"likes": 4,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"simulation",
"physics",
"doublependulum",
"datacube"
],
"hasliked": 0,
"retrieved": "2025-09-01T22:40:36.098288+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 258,
"src": "/media/previz/buffer01.png",
"ctype": "buffer",
"channel": 1,
"sampler": {
"filter": "nearest",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents | ($)~~o~~($) |\n// imporvements and remixes welcome!\n\n// image pass lets you see the datacube and then pick any parallel timeline\n// use the mouse and click to view a specific timeline (whole screen)\n\n// previously derived in https://www.shadertoy.com/view/wc33WX\nfloat sdLineSegment(vec2 p, vec2 a, vec2 b) {\n float d; \n float h = clamp(dot(p-a, b-a)/(length(b-a)*length(b-a)), 0.0, 1.0);\n vec2 q = mix(a, b, h); \n d = length(p-q);\n return d;\n}\n\nfloat sdBall(vec2 pos, float rad){\n return length(pos) - rad;\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // Normalized pixel coordinates (from -1 to 1)\n vec2 uv = fragCoord/iResolution.xy;\n vec2 cube_st = uv; // sampling coords we need for the whole screen\n uv *= 2.0;\n vec2 cube_uv = uv;\n uv -= 1.0;\n vec2 m = iMouse.xy/iResolution.xy;\n m *= 2.0;\n m -= 1.0;\n\n ivec2 st = ivec2(iMouse.xy); //TODO remap to lineup with the front of the cube!\n vec4 state = texelFetch(iChannel0, st, 0);\n \n // sanity checks!\n // m = Cartesian2Polar(m); // Polar2Cartesian\n // m = Polar2Cartesian(m.x, m.y); // Polar2Cartesian\n \n // sorta the background?\n vec4 cube = texture(iChannel1, cube_st);\n vec4 full = texture(iChannel0, cube_st); // this one for fullscreen! \n \n vec3 col = mix(full, cube, clamp(-cos(iTime*0.3)*5.0, 0.0, 1.0)).rgb; // since .a channel also has information it might be worth looking at that too!\n \n // TODO these scale is not the same as used for the simulation, but should be proportional\n vec2 inner_pos = Polar2Cartesian(state.x, 0.45);\n vec2 outer_pos = inner_pos - Polar2Cartesian(state.z, -0.45); // why minus here?\n \n // TODO make pixel width analytically correct!\n float pixel_width = 0.002;\n \n // TODO maybe make a draw func? void( inout bg, in fg, in mask)\n float selector_dist = sdBall(uv - m, 0.02); // TODO what happens outside the area?\n col = mix(col, vec3(0.8, 0.8, 0.4), smoothstep(pixel_width, -pixel_width, selector_dist));\n float innter_rod = sdLineSegment(uv, vec2(0.0, 0.0), inner_pos);\n col = mix(col, vec3(0.4, 0.4, 0.1), smoothstep(pixel_width, -pixel_width, innter_rod-0.01));\n float outer_rod = sdLineSegment(uv, inner_pos, outer_pos);\n col = mix(col, vec3(0.6, 0.6, 0.1), smoothstep(pixel_width, -pixel_width, outer_rod-0.01));\n float pendulum = sdBall(uv - outer_pos, 0.02);\n col = mix(col, vec3(1.0, 1.0, 0.1), smoothstep(pixel_width, -pixel_width, pendulum));\n // Output to screen\n \n \n float angle = atan(uv.x, uv.y);\n float dist = length(uv)*2.0;\n //col = vec3((angle/(PI*2.0))+ 0.5, 0.0, 0.0);\n \n fragColor = vec4(col, 1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "// this buffer does the physics simulation\n// outputs are as follows\n// .x position of the inner pendulum\n// .y rotational speed of the inner pendulum\n// .z position of the outer pendulum (in half rotations from the bottom) left/clockwise is negative\n// .w rotational speed of the outer pendulum clockwise is negative\n\n// substeps still scale the same time, but should be more \"accurate\" but results in why more noise too\n// energy is kept longer, breaks on really high numbers like > 110 or something :/\n#define SUBSTEPS 32\n// this can be changed to make it go \"faster\"\n#define GRAVITY -9.8\n\nvec4 init(vec2 uv) {\n // this let's you set the initial conditions in relation to screenspace, so play around here and have fun\n // requiers you to rewind time!\n vec4 vals;\n \n vals.xz = uv; // rotation position\n vals.yw = vec2(0.0); // start with 0 momentum!\n //vals.xz = vec2(1.0); // start at the top\n \n // vals.x = 0.0; //for testing the outer part is just hanging\n vals.yw = uv.yx*6.1; // add a lot of momentum!\n \n \n //vals.wy = uv*0.01; // really close initial conditions (start athe the top)\n \n vals.y *= -1.0; // little flip so the default mouse position looks more interesting!\n return vals;\n}\n\n// not used, left over from development!\nvoid simulate_single(inout vec2 pos, inout vec2 vel, float dt){\n // assume the mass is 1!\n vel.y += GRAVITY *dt;\n \n vec2 new_pos = pos + vel * dt;\n // constraints\n \n // assume length is 1 and fixed to the origin\n float error = 1.0 - length(new_pos);\n vec2 corr = new_pos * -1.0 * error;\n\n new_pos -= corr;\n\n vec2 next_vel = (new_pos - pos) / dt;\n\n // outputs\n pos = new_pos;\n vel = next_vel;\n}\n\n\nvoid simulate_double(float dt, inout vec2 pos_i, inout vec2 vel_i, inout vec2 pos_o, inout vec2 vel_o){\n // reference: https://github.com/matthias-research/pages/blob/master/tenMinutePhysics/06-pendulumShort.html MIT licnesed\n vec2 new_pos_i;\n vec2 new_vel_i;\n vec2 new_pos_o;\n vec2 new_vel_o;\n \n // step 1 apply gravity\n vel_i.y += GRAVITY *dt;\n vel_o.y += GRAVITY *dt;\n new_pos_i = pos_i + vel_i * dt;\n new_pos_o = pos_o + vel_o * dt;\n \n // step 2 constraints:\n // assume masses and lengths is 1\n // inner is fixed to 0,0 and outer is attached to inner \n vec2 delta = new_pos_o - new_pos_i;\n float error_o = 1.0 - length(delta);\n vec2 corr_i = error_o * -0.5 * delta; // 0.5 is the inverse mass\n vec2 corr_o = error_o * -0.5 * delta; \n new_pos_i += corr_i; // one adds and one subtracts.. to move them towards their targets\n new_pos_o -= corr_o;\n \n // TODO: is this order correct?\n float error_i = 1.0 - length(new_pos_i);\n corr_i = new_pos_i * -1.0 * error_i;\n new_pos_i -= corr_i;\n \n \n // step3 derive new velocities\n new_vel_i = (new_pos_i - pos_i) /dt;\n new_vel_o = (new_pos_o - pos_o) /dt;\n \n \n //return not needed but we use inouts\n pos_i = new_pos_i;\n vel_i = new_vel_i;\n pos_o = new_pos_o;\n vel_o = new_vel_o;\n}\n\n// lets first try a single pendulumn!\nvec2 single(float a, float v){\n // reference: https://youtu.be/XPZEeS70zzU\n\n // returns them as polar again?\n vec2 new;\n vec2 pos = Polar2Cartesian(a, 1.0);\n vec2 orth = Polar2Cartesian(a + 0.5, 1.0); // just an orthogonal vector we then scale\n vec2 vel = orth*v;\n \n int i;\n float dt = (iTimeDelta/float(SUBSTEPS));\n for (i=0; i<SUBSTEPS; i++){\n simulate_single(pos, vel, dt);\n } \n vec2 new_pos = pos;\n vec2 new_vel = vel;\n \n // reproject to polar coordinates: \n new.x = Cartesian2Polar(new_pos).x;\n \n vec2 next_orth = Polar2Cartesian(new.x + 0.5, 1.0);\n new.y = length(new_vel)*sign(dot(new_vel,next_orth));\n new.y = dot(new_vel, next_orth);\n return new;\n}\n\nvec4 double_pendulum(vec4 state){\n // decode positions and velocities\n float lenght_i = 1.0;\n float length_o = 1.0;\n //TODO where do we put the lengths?\n float angle_i = state.x;\n float momentum_i = state.y; \n vec2 pos_i = Polar2Cartesian(angle_i, lenght_i);\n vec2 vel_i = Polar2Cartesian(angle_i+0.5, 1.0)*momentum_i;\n \n float angle_o = state.z;\n float momentum_o = state.w;\n vec2 pos_o = Polar2Cartesian(angle_o, length_o);\n pos_o = pos_i + pos_o; // because the polar coordinates were centered.\n vec2 vel_o = Polar2Cartesian(angle_o+0.5, 1.0)*momentum_o;\n vel_o = vel_i + vel_o;\n \n int i;\n float dt = (iTimeDelta/float(SUBSTEPS));\n for (i=0; i<SUBSTEPS; i++){\n // sanity check here\n //simulate_single(pos_i, vel_i, dt);\n //simulate_single(pos_o, vel_o, dt);\n simulate_double(dt, pos_i, vel_i, pos_o, vel_o);\n }\n \n // reproject to polar coordinates:\n vec2 next_i;\n next_i.x = Cartesian2Polar(pos_i).x;\n vec2 orth_i = Polar2Cartesian(next_i.x+0.5, 1.0);\n next_i.y = length(vel_i)*sign(dot(vel_i,orth_i));\n next_i.y = dot(vel_i, orth_i);\n \n vec2 next_o;\n vec2 pos_o_rel = pos_o - pos_i; // we only store the relative position and motion of the outer pendulum\n next_o.x = Cartesian2Polar(pos_o_rel).x;\n vec2 orth_o = Polar2Cartesian(next_o.x+0.5, 1.0);\n vec2 vel_o_rel = vel_o - vel_i;\n next_o.y = length(vel_o_rel)*sign(dot(vel_o_rel,orth_o));\n next_o.y = dot(vel_o_rel, orth_o);\n \n // fix the one element for testing\n //next_i = vec2(0.0, 0.0);\n //next_o = vec2(0.0, 0.0);\n vec4 new_state = vec4(next_i, next_o);\n return new_state;\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // Normalized pixel coordinates (from -1..1)\n vec2 uv = fragCoord/iResolution.xy;\n uv *= 2.0;\n uv -= 1.0;\n \n ivec2 st = ivec2(fragCoord);\n vec4 prev = texelFetch(iChannel0, st, 0);\n if (iFrame < 2) { // maybe a fix for resizing here too?\n prev = init(uv); \n }\n //vec4 next = single(prev.x, prev.y).xyxy;\n vec4 next = double_pendulum(prev);\n //next.zw = vec2(0.0,0.0);\n //next = simulate(prev.xy, prev.zw, iTimeDelta);\n \n \n vec4 col = vec4(next);\n \n fragColor = vec4(col);\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
},
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 258,
"src": "/media/previz/buffer01.png",
"ctype": "buffer",
"channel": 1,
"sampler": {
"filter": "nearest",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 258,
"channel": 0
}
],
"code": "// this buffer basically just shows a fake 3D datacube\n// by shifting the previous frame over and then reading the next state!\n\n// TODO: maybe do this in pixels instead?\n#define SHIFT vec2(0.005, 0.002)\n\nvec4 init(vec2 uv){\n\n return vec4(0.0);\n}\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord ){\n // Normalized pixel coordinates (from 0 to 1)\n vec2 uv = fragCoord/iResolution.xy;\n \n vec2 new_st = (uv*2.0) -1.0;\n float new_mask = float(new_st.x > 0.0 || new_st.y > 0.0);\n \n vec2 prev_st = uv - SHIFT; \n vec4 prev = texture(iChannel1, prev_st); \n \n if (prev_st.x < 0.0 || prev.st.y < 0.0) {\n prev = vec4(0.0); // avoid the wrapping to the left\n }\n if (prev_st.x < 0.5 && prev_st.y < 0.5){\n // sorta a fake shadow?\n prev *= clamp(length(max(abs(uv.x-0.25), abs(uv.y-0.25)))+0.5, 0.0, 1.0);\n }\n prev *= 0.99; // darker everything too\n vec4 next = texture(iChannel0, new_st);\n // TODO abs or shift or something to get all values in the visible range!\n // for display we want to see it\n // next = abs(next); // too much symmetry\n // next += 0.5; // no black?\n next = clamp(next, vec4(0.0), vec4(1.0)); // maybe scale instead with some exp function or similar\n \n // TODO: make black transparent here?\n vec4 col = mix(next, prev, new_mask);\n\n \n // Output to screen\n fragColor = vec4(col);\n}",
"name": "Buffer B",
"description": "",
"type": "buffer"
},
{
"inputs": [],
"outputs": [],
"code": "// functions I need for the simulation and the vizualization - they get to be shared here!\n\n# define PI 3.141592\n\n\n// zero seems to be up and 1 is donw -.-\nvec2 Polar2Cartesian(float rot, float dist){\n rot -= 0.5; // is this even correct anymore?\n rot *= PI; \n vec2 res = vec2(cos(rot)*dist, sin(rot)*dist); \n return res;\n}\n\nvec2 Cartesian2Polar(vec2 pos){\n float rot, dist;\n dist = length(pos);\n rot = atan(pos.y, pos.x);\n rot /= PI;\n rot += 0.5;\n return vec2(rot, dist);\n}\n",
"name": "Common",
"description": "",
"type": "common"
}
]
},
{
"ver": "0.1",
"info": {
"id": "wXKSR1",
"date": "1754246850",
"viewed": 62,
"name": "texture bitplanes",
"username": "jakel101",
"description": "https://en.wikipedia.org/wiki/Bit_plane somehow came up on Discord... here we show the reconstructed uin8 bitplanes but split into RGBA channels.\n\nbot left is Red, bot right is Green, top left is Blue and top right is Alpha!",
"likes": 1,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"bitplane"
],
"hasliked": 0,
"retrieved": "2025-08-27T15:48:44.247118+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 5,
"src": "/media/a/8de3a3924cb95bd0e95a443fff0326c869f9d4979cd1d5b6e94e2a01f5be53e9.jpg",
"ctype": "texture",
"channel": 0,
"sampler": {
"filter": "mipmap",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "void mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // Normalized pixel coordinates (from 0 to 2)\n vec2 uv = fragCoord/iResolution.xy;\n uv*= 2.0;\n int channel = int(uv.x)*2 + int(uv.y);\n uv = fract(uv); \n vec4 tex = texture(iChannel0, uv);\n // wrange the texture back int uint8 representation\n int val = int(tex[channel]*255.0);\n \n int bit = (iFrame/30)%8; // lower this 30 to make your eyes hurt!\n val = (val >> bit) & 1;\n \n // as val is either 0 or 1 we don't need to scale up again to 255\n vec4 col = vec4(val);\n fragColor = vec4(col);\n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "t3tXz8",
"date": "1753142287",
"viewed": 169,
"name": "Dirt Terrain of Pillars",
"username": "jakel101",
"description": "this is meant to be a submission for https://itch.io/jam/acerola-dirt-jam\nnot VOXELS, height is not quantized!\ntracked changes on [url=https://github.com/Vipitis/shader_tracker/tree/main/jakel101/t3tXz8_Dirt_Terrain_of_Pillars]GitHub[/url]",
"likes": 13,
"published": 3,
"flags": 48,
"usePreview": 0,
"tags": [
"terrain",
"pathtracing",
"dirtjam"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents /^\u00a7^\\\n\n// Image pass implemented as my heightmap pathtracing project: https://www.shadertoy.com/view/M3VBWt\n// with a couple tweaks to make it work for this example :)\n\n// SOME BUGS: (or todos)\n// clouds have a strong moire pattern because traversal abbrpuptly begins at the top plane\n// some water seems to get green side texture?\n\n# define PI 3.141592653\n// tweaked with 0.5 in mind others could look wonky...\n# define HEIGHT_SCALE 0.55\n\n// this is square but still depends on the canvas resolution!\n# define CELLS ivec2(min(512.0,min(iChannelResolution[0].x, iChannelResolution[0].y)))\n\n// unsure yet where to bring this!\n# define SUN normalize(vec3(sin(iDate.w*0.05), cos(iTime*0.2), HEIGHT_SCALE*1.1))\n// normalize(vec3(3.0, -5.0, 2.0))\n\n// playing with this, using my imgui parser - https://github.com/pygfx/shadertoy/pull/46\n# define CLOUD_DENSITY 20.0\n\n\n// horizontal FOV, if you use negative values the camera will be orthographic!\n// examples:\n// FOV -1.0 for orthographic (sensor size)\n// FOV 90.0 for perspective wide\n// FOV 45.0 for perspective narower\n# define FOV 90.0\n\nivec2 worldToCell(vec3 p) {\n \n // move world space again\n p += 1.0;\n p *= 0.5;\n ivec2 st = ivec2((p.xy*vec2(CELLS.xy)));\n // TODO: find an actual solution to the edge cases!\n st = min(st, CELLS -1);\n return st;\n}\nstruct Ray{\n vec3 origin;\n vec3 dir;\n vec3 inv_dir; // for speedup?\n};\n\nstruct BoxHit{ \n bool hit;\n // rest illdefined for a miss\n bool inside;\n vec3 entry;\n vec3 exit;\n vec3 entry_norm;\n vec3 exit_norm;\n float entry_dist;\n float exit_dist;\n};\n\n// sorta reference: https://tavianator.com/2022/ray_box_boundary.html\n// TODO should be a HitInfo as the same will work for other intersections down the line.\nBoxHit AABB(vec3 center, vec3 size, Ray ray){\n BoxHit res;\n \n vec3 pos = center + size;\n vec3 neg = center - size;\n \n vec3 pos_dist = (pos-ray.origin) * ray.inv_dir;\n vec3 neg_dist = (neg-ray.origin) * ray.inv_dir;\n \n vec3 min_dist = min(pos_dist, neg_dist);\n vec3 max_dist = max(pos_dist, neg_dist);\n \n res.entry_dist = max(max(min_dist.x, min_dist.y), min_dist.z);\n res.exit_dist = min(min(max_dist.x, max_dist.y), max_dist.z);\n \n // essentially methods?\n res.hit = res.entry_dist < res.exit_dist && res.exit_dist > 0.0;\n res.inside = res.entry_dist < 0.0; // entry behind us\n \n res.entry = ray.origin + ray.dir*res.entry_dist;\n res.exit = ray.origin + ray.dir*res.exit_dist;\n \n // normals point away from the center\n res.entry_norm = -sign(ray.dir) * vec3(greaterThanEqual(min_dist, vec3(res.entry_dist)));\n res.exit_norm = sign(ray.dir) * vec3(lessThanEqual(max_dist, vec3(res.exit_dist)));\n \n return res;\n}\n\n\nBoxHit pillar_hits(ivec2 cell, float height, Ray ray){ \n // let's move the pillar into world space by having it's center + extends\n \n vec3 extend = vec3(1.0/vec2(CELLS), abs(height)*0.5);\n vec3 p = vec3(cell.xy, abs(height)*0.5); \n p.xy *= extend.xy; \n p.xy *= 2.0;\n p.xy -= 1.0 - extend.xy; // not quite the offset?\n //extend.z = extend.y; // make them cubes?\n \n // for the case of clouds the box is at the top?\n if (height < 0.0){\n p.z = HEIGHT_SCALE*(1.0-abs(height*0.5));\n } \n \n // TODO: redo this math when less asleep...\n BoxHit res = AABB(p, extend, ray);\n return res;\n}\n\n\nfloat transmittance(float dist){\n // ref video https://youtu.be/Qj_tK_mdRcA\n \n // bad approximation of \"beers law\"? (macro for absorption)\n float trans = exp(-dist*CLOUD_DENSITY);\n \n trans = clamp(trans, 0.0, 1.0);\n // this is meant as a transmittance: 1.0 means we see through the cloud and 0.0 means we don't.\n return trans;\n}\n\n\n\nvec3 terrain_palette(float h){\n // return a specific color based on height. \n // I manaually draw the RGB curves in a curve editor tool I have for thermal imaging\n // then crafted functions in graphtoy to minic their paths and put it here\n \n // offsets\n float h_r = h - 0.52;\n float h_g = h - 0.4;\n float h_b = h - 0.15;\n // cubic polynomials\n float r = (6.0*pow(h_r,3.0) + 0.1*pow(h_r,2.0) + 0.0*h_r +0.3);\n float g = (6.0*pow(h_g,3.0) + 0.1*pow(h_g,2.0) + -1.0*h_g +0.3);\n float b = (4.0*pow(h_b,3.0) + 0.1*pow(h_b,2.0) + -2.0*h_b +0.3);\n \n //vec3(0.267, 0.133, 0.001); // ~#442200\n vec3 col = vec3(r,g,b);\n col = clamp(col, vec3(0.0), vec3(1.0)); // ensure no negative or overbright colors!\n return col;\n}\n\nvec4 sampleHeight(ivec2 cell){\n // to allow for more complex math to determine height\n // .rgb should just return the texture color or some modification of it\n //cell.x = (cell.x + iFrame) % int(iChannelResolution[0].x); // fun texture scroll\n vec4 tex = texelFetch(iChannel0, cell, 0);\n vec4 res;\n res.a = tex.r; // our height data is in this channel\n res.rgb = terrain_palette(res.a*1.5-0.2); // move it a round a bit so the pallete looks okay...\n \n // could also just be a constant here!\n if (tex.b > 0.0){\n // cheap solid water in amount of water per pillar...\n // TODO semi transparen/reflective water?\n // the simulation is in the Buffer pass, we just reconstruct the height for rendering here\n res.a += tex.b;\n res.rgb = mix(vec3(0.2, 0.5, 0.8), vec3(0.1, 0.1, 0.9), tex.b*20.0); // little color for water \"depth\"\n }\n res.a *= HEIGHT_SCALE;\n return res;\n}\n// this could be joined into the function above.\nfloat sampleClouds(ivec2 cell){\n // idea is to read the texture data in a specific channel for cloud height/density?\n // this needs to be implemented in my function down below as an alternative hit.\n vec4 tex = texelFetch(iChannel0, cell, 0); \n float res = tex.g; // this channel has \"cloud\" terrain\n // maybe we clamp it or something to have no clouds?\n res -= 0.55; // negative values become clouds that show up.\n return res; \n}\n\nvec4 raycast(Ray ray, inout vec2 clouds){\n // cast the ray untill there is a hit or we exit the box\n // \"any hit\" shader?\n // returns tex + dist, negative dist means a \"miss\"\n // the inout for clouds sums up it's distance and depth of clouds.\n BoxHit box = AABB(vec3(0.0, 0.0, HEIGHT_SCALE*0.5), vec3(1.0, 1.0, HEIGHT_SCALE*0.5), ray);\n clouds = vec2(0.0, 0.0);\n \n vec3 entry = box.entry;\n \n if (!box.hit){\n // if we \"MISS\" the whole box (not inside?).\n \n return vec4(vec3(0.2, 0.8, 0.0), -abs(box.exit_dist));\n }\n // everything below here is inside the box\n if (box.inside){ \n // if we are \"inside\" the entry should just be ro!\n entry = ray.origin; // maybe problems with distance caluclations at the end?\n }\n \n //return vec4(vec3(0.6), 1.0);\n \n //return entry.rgbb;\n \n ivec2 current_cell = worldToCell(entry); // TODO: this one is problematic!\n int i;\n ivec2 max_cells = CELLS - min(current_cell, CELLS-current_cell);\n int max_depth = (max_cells.x + max_cells.y)+2; // could also be min!\n for (i = 0; i < max_depth; i++){ \n if (current_cell.x < 0 || current_cell.x >= CELLS.x ||\n current_cell.y < 0 || current_cell.y >= CELLS.y){\n // we marched far enough are are \"outside the box\" now!\n return vec4(vec3(0.4), -abs(box.exit_dist));\n } \n // so let's look for clouds first!\n float cloud_depth = sampleClouds(current_cell); \n if (cloud_depth < 0.){ // cand adjust how \"many\" clouds here!\n // only if there is a cloud we even consider this\n BoxHit cloud = pillar_hits(current_cell, (cloud_depth*0.2), ray);\n if (cloud.hit){ \n // for transmittance we accumulate the distance\n clouds.x += (distance(cloud.entry,cloud.exit)); // +(cloud_depth*0.001)); // add some random variation?\n \n // the \"color\" of clouds is based on the depth, scaled by the already accumulated transmittance\n // sorta the distance to the sun, exit because entry can be at the top\n // and how much of an angle the sun was at...\n // the idea is to sum up how much \"light\" this has accumulated.\n float hit_depth = (HEIGHT_SCALE-cloud.exit.z);\n float sun_angle = clamp(dot(SUN, vec3(0.0,0.0,1.0)), 0.0, 1.0);\n clouds.y += abs(cloud_depth*transmittance(clouds.x)*sun_angle*transmittance(hit_depth));\n \n // TODO use cloud.inside to increase the scale? (try to remove the moire pattern)\n //return vec4(vec3((1.0 - cloud_depth)), 0.0*abs(cloud_hit.x));\n }\n }\n \n vec4 tex = sampleHeight(current_cell);\n BoxHit pillar = pillar_hits(current_cell, tex.a, ray);\n \n if (pillar.hit) {\n // \"any hit\" (side/top/bot) -> loop ends here \n // do a little bit of light sim by doing diffuse \"block of chalk\"\n vec3 col = tex.rgb;\n // half the phong diffuse\n // TODO: assume some base \"emissive\" quality to all pillars (or scaled with some value?)\n // needs better hit model and shader to accumulate over a few traces.\n // TODO: should one of them be negative?\n col *= (2.0*dot(pillar.entry_norm, SUN)) + 0.2; // \"ambient\"/emission term\n \n return vec4(col, abs(pillar.entry_dist));\n }\n \n \n if (pillar.exit_dist >= box.exit_dist){\n return vec4(vec3(0.8), -abs(pillar.exit_dist));\n }\n \n // the step\n ivec2 next_cell = current_cell + ivec2(pillar.exit_norm.xy);\n if (next_cell == current_cell){\n // in this case we do another raycast - but without any Z component\n // so the vector is sideways and points to a new cell!\n vec3 flat_rd = vec3(ray.dir.xy, 0.0);\n Ray flat_ray = Ray(ray.origin, flat_rd, 1.0/flat_rd);\n \n BoxHit grid = pillar_hits(current_cell, 1.0, flat_ray);\n next_cell += ivec2(grid.exit_norm.xy); // TODO check if this norm is correct!\n }\n // for next iteration\n current_cell = next_cell;\n }\n //return vec4(vec2(current_cell)/vec2(CELLS), 0.0, 0.0);\n // defualt \"miss\"? -> like we exit the box?\n \n return vec4(vec3(1,0,0), -abs(box.exit_dist));\n\n}\n\n// more like a bad shadowmap\n// idea for the future: precompute the horizon per pixel: https://youtu.be/LluCbGdi-RM\nfloat shadow(Ray sun_ray){\n // return the amount of shadowed?\n // we are now marching upwards from some hit\n // ro is essentially the point we started from\n // rd is the sun angle\n vec2 cloud_values;\n vec4 res = raycast(sun_ray, cloud_values);\n //return res.a;\n if (res.a < 0.0){// || (ro + rd*res.a).z >= HEIGHT_SCALE){\n // likely means outside the box/ground!\n // so think like \"skylight\" \n float cloud_transmittance = transmittance(cloud_values.x);\n // full sunlight \n return cloud_transmittance;\n } \n else {\n return 0.0;\n }\n}\n\nfloat checkerboard(vec2 check_uv, float cells){\n check_uv *= cells/2.0;\n float rows = float(mod(check_uv.y, 1.0) <= 0.5);\n float cols = float(mod(check_uv.x, 1.0) <= 0.5);\n return float(rows == cols);\n}\n\n\nvec4 sampleGround(vec3 ro, vec3 rd){\n // for any ray that misses the heightmap\n // TODO: rename to sample skybox maybe? as the ground is sorta part of that...\n float ground_height = 0.0;\n float ground_dist = (ground_height-ro.z)/rd.z;\n if (ground_dist < 0.0) {\n // essentially sky hit instead?\n // just some random skybox right now... could be improved of course!\n vec3 col = vec3(0.23, 0.59, 0.92)*exp(dot(SUN, rd)-0.8);\n col = clamp(col, vec3(0.0), vec3(1.0));\n return vec4(col, 30.0); // some random distance that is positive!\n }\n \n vec3 ground_hit = ro + (rd * ground_dist);\n \n float val = checkerboard(ground_hit.xy, 8.0)* 0.1;\n val += 0.45;\n //val *= 2.0 - length(abs(ground_hit));\n \n // fake sun angle spotlight... TODO actual angle and normal calculation!\n val *= 2.5 - min(2.3, length((-SUN-ground_hit)));//,vec3(0.0,0.0,1.0));\n \n vec3 col = vec3(val);\n return vec4(col, ground_dist);\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // uv normalized to [-1..1] for height with more width\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n vec2 mo = (2.0*iMouse.xy - iResolution.xy)/iResolution.y;\n \n //fragColor = texture(iChannel0, uv);\n //return;\n \n // for when it's just idling... \n float azimuth = -1.9+iTime*0.1 + mo.x; // keeps a bit of residue of the mouse!\n float altitude = 0.7+cos(1.5+iTime*0.25)*0.35; \n if (sign(iMouse.z) > 0.0){\n // orbiting camera setup\n azimuth = PI*mo.x;\n altitude = 0.5*PI*clamp(mo.y+1.0, -0.01, 0.99); // maybe just positive?\n }\n \n // make sure you don't look \"below\"\n altitude = clamp(altitude, HEIGHT_SCALE*0.2, PI);\n \n // a unit length orbit!\n vec3 camera_pos = vec3(\n cos(azimuth)*cos(altitude),\n sin(azimuth)*cos(altitude),\n sin(altitude)); \n // the camera is always looking \"at\" the origin or half way above it\n vec3 look_dir = normalize(vec3(0.0, 0.0, HEIGHT_SCALE*0.5) - camera_pos);\n \n \n // TODO moving the camera in and out over time??\n camera_pos += look_dir * -0.1; // moving the camera \"back\" to avoid occlusions?\n // two vectors orthogonal to this camera direction (tagents?) \n //vec3 look_u = camera_pos + vec3(-sin(azimuth), cos(azimuth), 0.0);\n //vec3 look_v = camera_pos + vec3(sin(altitude)*-cos(azimuth), sin(altitude)*-sin(azimuth), cos(altitude)); \n\n \n // turns out analytically these aren't correct. so using cross instead -.-\n vec3 look_u = normalize(cross(vec3(0.0, 0.0, -1.0), look_dir));\n vec3 look_v = normalize(cross(camera_pos, look_u)); // is this faster?\n // camera plane(origin of each pixel) -> barycentric?\n \n vec3 camera_plane;\n vec3 ray_dir;\n vec3 ray_origin;\n \n if (FOV > 0.0){\n // assume a pinhole camera.\n // FOV is the horizontal fov, the given focal length becomes:\n // the 1.0 is the sensor height.\n float focal_length = 1.0/tan(radians(FOV*0.5));\n \n // the ro\n camera_plane = camera_pos - (look_dir*focal_length) + ((look_u*uv.x) + (look_v*uv.y))*-1.0; // inverted here to see upright\n ray_origin = camera_pos;\n \n // the rd\n ray_dir = camera_pos-camera_plane;\n ray_dir = normalize(ray_dir); \n }\n \n else {\n // negative FOV values are interpreted as a sensor size for a orthographic camera!\n // horizontal sensor size, -1 would be something sensible... everything else is far away\n float sensor_size = FOV*0.5*-1.0;\n camera_plane = camera_pos + ((look_u*uv.x)+(look_v*uv.y))*sensor_size; // wider fov = larger \"sensor\"\n ray_dir = look_dir;\n ray_origin = camera_plane;\n }\n \n Ray camera = Ray(ray_origin, ray_dir, 1.0/ray_dir);\n \n // actual stuff happening:\n vec2 cloud_val;\n vec4 res = raycast(camera, cloud_val);\n // fragColor = vec4(vec3(res.rgb),1.0);\n //return; // early debug exit\n if (res.a < 0.0) {\n // we missed the initial terrain\n res = sampleGround(ray_origin, ray_dir);\n \n // TODO: the skybox hit returns a negative distance, so we need to handle that\n //res.a = abs(res.a);\n }\n vec3 hit = ray_origin + (ray_dir*res.a);\n\n // TODO: offset the ro a bit?\n Ray sun_check = Ray(hit+0.001*SUN, SUN, 1.0/SUN);\n \n vec2 cloud_foo; // unused?\n vec4 ref = raycast(sun_check, cloud_foo).rgba; //reflection (the full shadow) \n ref.rgb *= 1.0 - step(0.0, ref.a); // this makes misses black?\n // ref.rgb *= 1.0-exp(-shadow_cloud*15.0); // more \"realistic\" cloud shadow?\n \n float shadow_amt = shadow(sun_check);\n // actually more light amount -.-\n // so we add and \"ambient\" base like here\n vec3 col = res.rgb * max(0.3, shadow_amt);\n\n\n // distance fog? I don't like it so it's commented out\n // float dist_fog = transmittance(res.a *0.015);\n // vec3 fog_col = vec3(0.4, 0.5, 0.9);\n // col = mix(col, fog_col, 1.0-dist_fog);\n \n\n float cloud_trans = transmittance(cloud_val.x);\n vec3 cloud_col = vec3(1.0 - cloud_val.y*0.35); // *(1.0-cloud_trans); // no more alphapremultiplication...\n col = mix(col, cloud_col, 1.0-cloud_trans);\n \n // TODO: better \"shadow\" value via actually colored shadow??\n // vec3 col2 = res.rgb + ref.rgb*0.3; \n // col = vec3(uv.x > 0.0 ? col.rgb : col2.rgb);\n \n fragColor = vec4(vec3(col),1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [
{
"id": 33,
"src": "/presets/tex00.jpg",
"ctype": "keyboard",
"channel": 1,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "// for the terrain map I combined a bunch of existing and tutorial level snippets\n\n// water simulation controls:\n// SPACE - toggle all simulaion on/off :: default:on\n// E - Erosion simulation toggle on/off :: default:off\n// R - Rain for low clouds toggle on/off :: defualt:on\n\n\n# define OCTAVES 8\n\n// from https://www.shadertoy.com/view/XlGcRh\nuvec2 pcg2d(uvec2 v)\n{\n v = v * 1664525u + 1013904223u;\n\n v.x += v.y * 1664525u;\n v.y += v.x * 1664525u;\n\n v = v ^ (v>>16u);\n\n v.x += v.y * 1664525u;\n v.y += v.x * 1664525u;\n\n v = v ^ (v>>16u);\n\n return v;\n}\n\n// wraps the bitconversion and just accessses half the values\nfloat hash21(in vec2 a){\n uvec2 b = uvec2(floatBitsToUint(a.x), floatBitsToUint(a.y));\n uvec2 c = pcg2d(b);\n float r = float(c.x)/float(uint(-1)); // seems to work I guess... but don't we lose a lot of data??\n // fract(uintBitsToFloat(c.x)) // this one causes issues due to NaN or something, results in black spots in the noise\n return r;\n}\n\n// TODO to make the clouds more accurate, this should actually be a slice of 3D noise we rotate through\nfloat noise(in vec2 a){\n // perlin 2D noise\n vec2 i = floor(a);\n vec2 f = fract(a);\n \n // four corners\n float bl = hash21(i + vec2(0.0, 0.0));\n float br = hash21(i + vec2(1.0, 0.0));\n float tl = hash21(i + vec2(0.0, 1.0));\n float tr = hash21(i + vec2(1.0, 1.0));\n \n vec2 s = smoothstep(0.0, 1.0, f);\n \n \n return mix( mix(bl, br, s.x),\n mix(tl, tr, s.x), s.y);\n}\n\n\nfloat noise3d(in vec3 a){\n float i = floor(a.z);\n float n1 = noise(a.xy+vec2(i));\n float n2 = noise(a.xy+vec2(i+1.0));\n \n float s = smoothstep(0.0, 1.0, fract(a.z));\n return mix(n1, n2, s);\n}\n\n// via https://thebookofshaders.com/13/\nfloat fbm(in vec2 p){\n \n // parameters\n int octaves = OCTAVES;\n float l = 2.0;\n float g = 0.5;\n \n // initial values\n float a = 0.5;\n float f = 1.0;\n float res = 0.0;\n for(int i = 0; i < octaves; i++){\n res += a * noise(p*f);\n f *= l;\n a *= g;\n \n }\n return res;\n}\n\nfloat fbm3(in vec3 p){\n // parameters\n int octaves = OCTAVES;\n float l = 2.0;\n float g = 0.5;\n \n // initial values\n float a = 0.5;\n float f = 1.0;\n float res = 0.0;\n for(int i = 0; i < octaves; i++){\n res += a * noise3d(p*f);\n f *= l;\n a *= g;\n \n }\n return res;\n}\n\n\nvec4 init_terrain(vec2 uv, float time_seed){\n // initialize the terrain?\n\n vec4 start;\n // we don't do anything interesting here :(\n float height = fbm(uv*3.0+vec2(time_seed*0.2));\n \n // let's have some fun!\n float clouds = fbm(uv*4.0+vec2(-time_seed*0.1));\n \n // water as an amount, not a height.\n float water = max(0.0, 0.2-height);\n\n // alpha channel currently not used...\n start = vec4(vec3(height, clouds, water),1.0);\n return start;\n}\n\n// idea... look at the the neighbords and then check if water exists in the highest point.\n// step the water down (by full amount) or based on the gradient?\nvec2 simulate_water(ivec2 pos){\n // do we get the clouds to know where it rains?\n vec4 old = texelFetch(iChannel0, pos, 0); // these could be passed in? \n float old_water = old.z;\n float old_height = old.x;\n float old_cloud = old.y; // can we evaporate to collect and rain to lose cloud?\n float old_wind = old.w; // like radians or amplitude?\n \n // maybe only evaporate if there isn't clouds?\n float evaporation = iTimeDelta*(max(0.0,(3.5*old_height)-0.3));\n old_water *= (1.0-evaporation); // or last?\n \n float water_level = old_height + old_water; \n float water_change = 0.0; // the amount \"added\" or removed\n float height_change = 0.0; //erosion and deposition!\n \n \n \n // rain, toggle with R\n float rain_toggle = 1.0 - texelFetch(iChannel1, ivec2(82, 2), 0).x; \n if (old_cloud < 0.25) {\n // could be based on cloud thickness, maybe even drain the clouds?\n float rain_amount = abs(min(0.0, (old_cloud - 0.25)));\n water_change += iTimeDelta*rain_amount*rain_toggle;\n } \n \n // just 4 neighbors is hopefully fine, all 8 would require some sqrt(2) scaling?\n ivec2 neighbors[4] = ivec2[4] (ivec2(1,0), ivec2(-1,0), ivec2(0,1), ivec2(0,-1));\n //ivec2 neighbors[8] = ivec2[8] (ivec2(1,0), ivec2(1,1), ivec2(0,1), ivec2(-1,1), ivec2(-1,0), ivec2(-1,-1), ivec2(0,-1), ivec2(1,-1));\n int i;\n for (i=0; i<neighbors.length(); i++){ \n vec4 n = texelFetch(iChannel0, pos+neighbors[i], 0);\n //float slope = n.x - old_height;\n float n_level = n.x + n.z; // terrain height + water height\n float water_diff = n_level - water_level;\n water_change += clamp(water_diff, -old_water, n.z);\n //height_change += -water_diff*slope; // this is not it -.-\n }\n \n water_change /= float(neighbors.length()); // does this need to be normalized?\n height_change /= float(neighbors.length());\n \n \n // motivated by the ideas here: https://www.youtube.com/watch?v=eaXk97ujbPQ\n // erosion demo: toggle with E\n float erosion_toggle = texelFetch(iChannel1, ivec2(69, 2), 0).x; \n height_change += -(old_water*water_change);\n height_change *= erosion_toggle;\n \n return vec2(old_height + height_change, old_water + water_change);\n}\n\n\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n \n vec2 uv = (fragCoord * 2.0 - iResolution.xy)/iResolution.y;\n ivec2 st = ivec2(fragCoord);\n \n if (fragCoord.x > iResolution.y || fragCoord.x > 512.5 || fragCoord.y > 512.5){\n // will break on protrait aspect ratio -.-\n discard; // throw away the threads that are outside the simulation area\n }\n \n //TODO terrain can still move if we offset the sample coords!\n vec4 prev = texelFetch(iChannel0, st, 0);\n if (iFrame < 1 || prev.x <= 0.0){ // hack for resizing? -> still messes up uv scaling...\n prev = init_terrain(uv, 0.0); // TODO: change the value here to something like iDime.w to et different starting terrains.\n }\n else {\n // let's have some fun!\n float clouds = fbm3(vec3(uv*5.0+vec2(-iTime*0.2),iTime*0.1)); //\n \n prev.y = clouds;\n // press spacebar to toggle water sim (rain, gravity and evaporation) on/off... stars on on.\n if (texelFetch(iChannel1, ivec2(32, 2), 0).x<0.5){\n prev.xz = simulate_water(st);\n }\n }\n prev = clamp(prev, vec4(0.0), vec4(1.0)); // maybe that fixes the big black box showing up sometimes?\n \n \n \n // clouds = uv.y; //ramp for testing\n // the blue channel might be water... (and we could animate/simulate it here!)\n //fragColor = vec4(vec3(height, clouds, water),1.0);\n \n fragColor = prev;\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
}
]
},
{
"ver": "0.1",
"info": {
"id": "tXK3Rd",
"date": "1752868197",
"viewed": 127,
"name": "interactive broken LCD",
"username": "jakel101",
"description": "the lowest effort idea on my massive list of shader ideas. Use the mouse start and end positions to break it in another way.\nYou can also put this in the image pass for other existing shaders!",
"likes": 9,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"postfx",
"broken",
"lcd",
"discord"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents [!__!] \n// this is meant as a effects pass (image pass)\n// where it reads some background and make it look sorta broken\n// improvements and remixes welcome!\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // Normalized pixel coordinates (from 0 to 1)\n vec2 uv = fragCoord/iResolution.xy;\n vec2 mouse_start = abs(iMouse.zw)/iResolution.xy;\n vec2 mouse_end = iMouse.xy/iResolution.xy;\n \n // default values for before you touch mouse first time!\n if (iMouse.x == 0.0 && iMouse.z == 0.0){\n mouse_start = vec2(0.2);\n mouse_end = vec2(0.2);\n }\n \n // to capture the background, we distor the uv a bit for extra fun\n // disorted uv along the vertical axis\n vec2 distoriton = uv;\n \n // this one moves but has glitchy edges\n float fuzzy = clamp(tan(iTime*iDate.w),-0.3, 0.1);\n float strength_mask = 1.0 -clamp(abs(uv.y*2.0 - mouse_end.y-0.3)*(fuzzy+3.0), 0.0, 1.0);\n float offset = exp(sin(distoriton.y*30000.0*iTime))*max(-0.3, cos(iTime*0.33)*4.0);\n distoriton.y += offset*strength_mask;\n \n // this goes negative or positive a few times but stays at 0 most of the time...\n float sporadic = min(sin(mod(iTime, 3.4)), 0.0) - min(cos(mod(iTime*.3, 1.6)), 0.0)*sign(cos(iTime*3.0));\n \n distoriton.x += (0.5-uv.y)*sign(sporadic)*0.1;\n vec3 good = texture(iChannel0, distoriton).rgb; // TODO: different name\n\n\n // broken area\n vec3 bad = vec3(1.0) - good;\n bad = normalize(bad);\n bad -= fract(uv.x * 73.8 + sin(iTime*0.04-uv.x*914.0035))* 10.0; // pseudo hash that looks good enough right now\n //bad = max(vec3(0.0), bad);\n bad.b *= 64.0;\n bad.g *= uv.x*mouse_end.x;\n bad.b += (tan((mouse_start.y - uv.y)*1.0));\n \n bad = clamp(bad, vec3(0.0), vec3(1.0));\n\n \n //bad = normalize(bad);\n // todo additional distortions, streaks etc \n vec3 bad2 = vec3(1.0 - bad);\n bad2 = pow(bad2, good*0.2); \n bad2 -= fract(uv.y * 73.8 + sin(iTime*0.04-uv.y*914.0035))* 10.0;\n bad2 *= cross(vec3(mouse_start.xy, sign(cos(iTime*0.8))), normalize(vec3(sin(iTime*0.3),uv.yx)));\n \n bad2 = clamp(bad2, vec3(0.0), vec3(1.0)); \n\n\n // simple polynomals, offset by the mouse positions. Have a root in 0.0... could be more dynamic\n // TODO: more dynamic?\n \n //abs(x)*(x*0.1 -1)*0.2\n float f1 = abs((uv.x-mouse_start.x))*((uv.x-mouse_start.x)*1.5 -1.0)*0.2+mouse_start.y;\n f1 += fuzzy*0.03*abs(mouse_end.x);\n //float f1 = 0.12*((uv.x-mouse_start.x)-2.0)*-(pow(((uv.x-mouse_start.x)-2.0),3.0)+9.0)-0.24+mouse_start.y;\n float f2 = abs((uv.y-mouse_end.y))*((uv.y-mouse_end.y)*1.2 -1.0)*0.2+mouse_end.x;\n f2 += fract(sin(40.0 +uv.y*mouse_start.y*523.3))*0.002; // cheap \"cracked edge\"\n //float f2 = 3.0*((uv.y-mouse_end.y) * -(uv.y-mouse_end.y)) *(0.84*uv.y-mouse_end.y) + mouse_end.x; \n \n float mask1 = float((f1 > uv.y));\n float mask2 = float((f2 > uv.x)); \n \n vec3 col = good;\n col = mix(col, bad, mask1);\n col = mix(col, bad2, mask2);\n col = mix(col, bad+bad2, mask1*mask2); // awful overlap?\n // Output to screen\n fragColor = vec4(vec3((col)),1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "// forked from https://www.shadertoy.com/view/tXV3Rw\n// changes:\n// parameterized colors\n// expanded style to fit mine...\n// made smaller, changed colors,\n// added little glow animation\n\n// yes, join the Discord!! https://discord.gg/XtmMN6E\n// it's official, see the bottom left corner\n\n\n// goal is to get the apple logo kinda look, ref by \n// https://discord.com/channels/578696555612209173/579528698164805634/1395607001274515508\n\nfloat sdEllipse(vec2 p, vec2 r)\n{\n float f = length(p / r),\n g = length(p / r / r);\n \n return f * (f - 1.) / g;\n}\n\nfloat sdDiscord(vec2 p)\n{\n p.x = abs(p.x);\n \n float d = length(p + vec2(0, .52)) - .91;\n d = max(d, length(p - vec2(0, .41)) - .83);\n d = max(d, length(p - vec2(.82, .09)) - .74);\n d = max(d, min(.54 - length(p - vec2(0, .21)), \n -(.81 * p.x + p.y + .03) / length(vec2(.81, 1))));\n d = min(d, max(length(p - vec2(0, .3)) - .59,\n length(p + vec2(0, .36)) - .7));\n d = max(d, length(p + vec2(.34, .16)) - .84);\n d = max(d, -sdEllipse(p - vec2(.165, -.038), vec2(.09, .1)));\n \n return d;\n}\n\nvoid mainImage(out vec4 fragColor, vec2 fragCoord)\n{\n vec2 uv = (fragCoord - .5 * iResolution.xy) / iResolution.y;\n \n float d = sdDiscord(uv*2.0);\n \n vec3 logo_col = vec3(0.878, 0.89, 1.0);\n vec3 back_col = vec3(0.345, 0.396, 0.949);\n vec3 dark_col = vec3(0.07, 0.07, 0.08); //~#121214\n\n // cheap glow\n float glow = exp(-d*50.0);\n \n vec3 col = mix(logo_col, dark_col, smoothstep(-1., 1., d * iResolution.y));\n col += glow*((sin(iTime)*0.4)+0.5);\n \n fragColor = vec4(col, 1.0);\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
}
]
},
{
"ver": "0.1",
"info": {
"id": "tXc3D7",
"date": "1751330967",
"viewed": 717,
"name": "checkerboard with only mix()",
"username": "jakel101",
"description": "what if you are only allowed to use one built in function?\n\nwhich other function has potential?",
"likes": 2,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"justmix",
"onlymix",
"onefunction"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "/*#=#=#=#=#=#=#=#=#=#=#=#=#=#=\n* The One Function Challenge\n* #=#=#=#=#=#=#=#=#=#=#=#=#=#=\n* \n* only use a single built in function\n*\n* !RULES:\n* + constructors, literals, uniforms are OKAY\n* + array subscript and swizzling are FINE\n* - functions and operators (including unary -, comparison) are NOT ALLOWED\n* ~ control flow (for, while, if, else, ...) is ALRIGHT (but can we do without?)\n* LIMITATIONS:\n* > since there will be no subsitutes for texture sampling or bitcasting,\n* > it will be Image pass only, and no input Channels (open for debate)\n*\n*\n* /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\\n* First Edition: only mix\n* \\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n*\n* https://registry.khronos.org/OpenGL-Refpages/es3.0/html/mix.xhtml\n* >> mix(a,b,x) = a*(1-x) + b*x\n* We have recently been told to not use mix as an optimization:\n* https://iquilezles.org/articles/gpuconditionals/ so hear me out.\n* What if we ONLY use mix? Contributions, Submissions and Improvements welcome!\n* \n* self link: https://www.shadertoy.com/view/tXc3D7\n*/\n\n\n\n\n// the following is my attempt to do at least a checkerboard and see how far I can go.\n// If you don't want to be spoiled and have the fun of discovery yourself, don't scroll down!\n// thanks to @Coderizer for brainstorming the start, @Cottrezz and @diatribes for feedback.\n// please join the discussion below or better yet on the official Discord.\n// yes there is an official Shadertoy discord, scroll down on the website invite link is bottom left corner\n// link to where my idea started (and discussion followed):\n// https://discord.com/channels/578696555612209173/578696556069257231/1389206125584449697\n\n\n// just as a joke, we can even define mix by it's implementation\n# define MIX(a,b,x) (a*(1-x) + b*x)\n\n// arithmetic\n# define MUL(a,b) mix(0.0, a, b)\n# define NEG(a) mix(a, 0.0, 2.0)\n# define ADD(a,b) MUL(mix(a,b, 0.5), 2.0)\n# define SUB(a,b) ADD(a, NEG(b))\n// DIV still missing (without some kind of loop)\n\n// logic\n# define NOT(a) bool(mix(1.0, 0.0, float(bool(a))))\n# define OR(a,b) bool(ADD(float(bool(a)), float(bool(b))))\n# define AND(a,b) NOT(OR(NOT(a), NOT(b)))\n# define XOR(a,b) AND(OR(a,b), NOT(AND(a,b)))\n\n// conditionals\n# define EQ(a,b) bool(NOT(bool(SUB(a,b))))\n\n\n// shader specific\n# define FLOOR(a) float(int(a))\n# define FRACT(a) SUB(a,FLOOR(a))\n# define STEP(a,x) float(bool(uint(ADD(SUB(x,a),1.0))))\n// STEP also works as LEQ (less eqaul than) I think\n// NOTICE: uint(-1.0) is undefined behaviour and therefore not the greatest resource\n\n// exapand formulas to vectors where needed.\n# define MUL2(a,b) vec2(MUL(a.x, b.x), MUL(a.y, b.y))\n# define FRACT2(a) vec2(FRACT(a.x), FRACT(a.y))\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // I wanted to have it be an 8x8 board but game up here\n float cell_width = MUL(iResolution.x, 0.25);\n float cell_height = MUL(iResolution.y, 0.25);\n \n float cell_x = STEP(MUL(iResolution.x, 0.5), fragCoord.x);\n float cell_y = STEP(MUL(iResolution.y, 0.5), fragCoord.y);\n \n vec3 col;\n \n col = vec3(XOR(cell_x, cell_y)); \n fragColor = vec4(col,1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "wcySRc",
"date": "1750458246",
"viewed": 104,
"name": "3D waterfall audio",
"username": "jakel101",
"description": "first idea implementing: https://www.shadertoy.com/view/M3VBWt\nwill behave differently on your resolution and framerate, let me know what works and what doesn't",
"likes": 8,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"3d",
"audio"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents \\_%_/\n\n/* Image pass presents the texture calculated in Buffer A \n* this is adapted from my heightmap shader:\n* https://www.shadertoy.com/view/M3VBWt\n* pretty much in progress but this is one of the ideas I have had!\n* feedback/improvements welcome here or on the original.\n*/\n\n\n# define PI 3.141592653\n# define HEIGHT_SCALE 0.5\n\n// resolution of the sampled area limit Y to some number smaller than iResolution.y to change the \"speed\"\n# define CELLS ivec2(iChannelResolution[0].x, min(iChannelResolution[0].y,512.0))\n\n// unsure yet where to bring this!\n# define SUN normalize(vec3(sin(iDate.w*0.5), cos(iTime), HEIGHT_SCALE*1.5))\n// normalize(vec3(3.0, -5.0, 2.0))\n\n// horizontal FOV, if you use negative values the camera will be orthographic!\n// examples:\n// FOV -1.0 for orthographic (sensor size)\n// FOV 90.0 for perspective wide\n// FOV 45.0 for perspective narower\n# define FOV 90.0\n\n\nivec2 worldToCell(vec3 p) {\n // move world space again\n p += 1.0;\n p *= 0.5;\n ivec2 st = ivec2((p.xy*vec2(CELLS.xy)));\n // TODO: find an actual solution to the edge cases!\n st = min(st, CELLS -1);\n return st;\n}\n\nstruct Ray{\n vec3 origin;\n vec3 dir;\n vec3 inv_dir; // for speedup?\n};\n\nstruct HitInfo{\n bool hit;\n // rest illdefined for a miss\n bool inside;\n vec3 entry;\n vec3 exit;\n vec3 entry_norm;\n vec3 exit_norm;\n float entry_dist;\n float exit_dist;\n};\n\n// sorta reference: https://tavianator.com/2022/ray_box_boundary.html\nHitInfo AABB(vec3 center, vec3 size, Ray ray){\n HitInfo res;\n\n vec3 pos = center + size;\n vec3 neg = center - size;\n\n vec3 pos_dist = (pos-ray.origin) * ray.inv_dir;\n vec3 neg_dist = (neg-ray.origin) * ray.inv_dir;\n\n vec3 min_dist = min(pos_dist, neg_dist);\n vec3 max_dist = max(pos_dist, neg_dist);\n\n res.entry_dist = max(max(min_dist.x, min_dist.y), min_dist.z);\n res.exit_dist = min(min(max_dist.x, max_dist.y), max_dist.z);\n\n // normals point away from the center\n res.entry_norm = -sign(ray.dir) * vec3(greaterThanEqual(min_dist, vec3(res.entry_dist)));\n res.exit_norm = sign(ray.dir) * vec3(lessThanEqual(max_dist, vec3(res.exit_dist)));\n\n // essentially methods?\n res.entry = ray.origin + ray.dir*res.entry_dist;\n res.exit = ray.origin + ray.dir*res.exit_dist;\n\n res.hit = res.entry_dist < res.exit_dist && res.exit_dist > 0.0;\n res.inside = res.entry_dist < 0.0; // entry behind us\n\n return res;\n}\n\n\nHitInfo pillar_hits(ivec2 cell, float height, Ray ray){\n // let's move the pillar into world space by having it's center + extends\n\n vec3 extend = vec3(1.0/vec2(CELLS), abs(height)*0.5);\n vec3 p = vec3(cell.xy, abs(height)*0.5);\n p.xy *= extend.xy;\n p.xy *= 2.0;\n p.xy -= 1.0 - extend.xy; // not quite the offset?\n //extend.z = extend.y; // make them cubes?\n\n // for the case of clouds the box is at the top?\n if (height < 0.0){\n p.z = HEIGHT_SCALE*(1.0-abs(height*0.5));\n }\n\n // TODO: redo this math when less asleep...\n HitInfo res = AABB(p, extend, ray);\n return res;\n}\n\n\nvec4 sampleHeight(ivec2 cell){\n // to allow for more complex math to determine height\n // .rgb should just return the texture color or some modification of it\n //cell.x = (cell.x + iFrame) % int(iChannelResolution[0].x); // fun texture scroll\n vec4 tex = texelFetch(iChannel0, cell, 0);\n vec4 res;\n res.a = (tex.r + tex.g + tex.b)/3.0;\n res.rgb = tex.rgb; // * res.a; // to make it more of a \"height\" map?\n //res.rgb = vec3(0.5);\n //res.a = tex.a; // use existing height data?\n res.a *= HEIGHT_SCALE;\n return res;\n}\n\n\nvec4 raycast(Ray ray){\n // cast the ray untill there is a hit or we exit the box\n // \"any hit\" shader?\n // returns tex + dist, negative dist means a \"miss\"\n // the inout for clouds sums up it's distance and depth of clouds.\n HitInfo box = AABB(vec3(0.0, 0.0, HEIGHT_SCALE*0.5), vec3(1.0, 1.0, HEIGHT_SCALE*0.5), ray);\n\n vec3 entry = box.entry;\n\n if (!box.hit){\n // if we \"MISS\" the whole box (not inside?).\n\n return vec4(vec3(0.2, 0.8, 0.0), -abs(box.exit_dist));\n }\n // everything below here is inside the box\n if (box.inside){\n // if we are \"inside\" the entry should just be ro!\n entry = ray.origin; // maybe problems with distance caluclations at the end?\n }\n\n //return vec4(vec3(0.6), 1.0);\n\n //return entry.rgbb;\n\n ivec2 current_cell = worldToCell(entry); // TODO: this one is problematic!\n int i;\n ivec2 max_cells = CELLS - min(current_cell, CELLS-current_cell);\n int max_depth = (max_cells.x + max_cells.y)+2; // could also be min!\n for (i = 0; i < max_depth; i++){\n if (current_cell.x < 0 || current_cell.x >= CELLS.x ||\n current_cell.y < 0 || current_cell.y >= CELLS.y){\n // we marched far enough are are \"outside the box\" now!\n return vec4(vec3(0.4), -abs(box.exit_dist));\n }\n\n vec4 tex = sampleHeight(current_cell);\n HitInfo pillar = pillar_hits(current_cell, tex.a, ray);\n\n if (pillar.hit) {\n // \"any hit\" (side/top/bot) -> loop ends here\n // do a little bit of light sim by doing diffuse \"block of chalk\"\n vec3 col = tex.rgb;\n // half the phong diffuse\n // TODO: assume some base \"emissive\" quality to all pillars (or scaled with some value?)\n // needs better hit model and shader to accumulate over a few traces.\n // TODO: should one of them be negative?\n col *= (2.0*max(0.0, dot(pillar.entry_norm, SUN)))+0.2; // where does the 2.0 factor came from?\n return vec4(col, abs(pillar.entry_dist));\n }\n\n // check if our exit distance larger than the box, means we should be at the final pillar...\n if (pillar.exit_dist >= box.exit_dist){\n return vec4(vec3(0.8), -abs(pillar.exit_dist));\n }\n\n // the step\n ivec2 next_cell = current_cell + ivec2(pillar.exit_norm.xy);\n if (next_cell == current_cell){\n // in this case we do another raycast - but without any Z component\n // so the vector is sideways and points to a new cell!\n vec3 flat_rd = vec3(ray.dir.xy, 0.0);\n Ray flat_ray = Ray(ray.origin, flat_rd, 1.0/flat_rd);\n\n HitInfo grid = pillar_hits(current_cell, 1.0, flat_ray);\n next_cell += ivec2(grid.exit_norm.xy); // TODO check if this norm is correct!\n }\n // for next iteration\n current_cell = next_cell;\n }\n //return vec4(vec2(current_cell)/vec2(CELLS), 0.0, 0.0);\n // defualt \"miss\"? -> like we exit the box?\n\n return vec4(vec3(1,0,0), -abs(box.exit_dist));\n\n}\n\n// more like a bad shadowmap\n// idea for the future: precompute the horizon per pixel: https://youtu.be/LluCbGdi-RM\nfloat shadow(Ray sun_ray){\n // return the amount of shadowed?\n // we are now marching upwards from some hit\n // ro is essentially the point we started from\n // rd is the sun angle\n vec4 res = raycast(sun_ray);\n //return res.a;\n if (res.a < 0.0){// || (ro + rd*res.a).z >= HEIGHT_SCALE){\n return 1.0; // miss means full sunlight!\n }\n else {\n // TODO: use distance?\n return 0.5; // additional ambient light from here?\n }\n}\n\n// copied from https://www.shadertoy.com/view/M3jGzh\nfloat checkerboard(vec2 check_uv, float cells){\n check_uv *= cells/2.0;\n float rows = float(mod(check_uv.y, 1.0) <= 0.5);\n float cols = float(mod(check_uv.x, 1.0) <= 0.5);\n return float(rows == cols);\n}\n\nvec4 sampleGround(vec3 ro, vec3 rd){\n // for any ray that misses the heightmap\n // TODO: rename to sample skybox maybe? as the ground is sorta part of that...\n float ground_height = 0.0;\n float ground_dist = (ground_height-ro.z)/rd.z;\n if (ground_dist < 0.0) {\n // essentially sky hit instead?\n // just some random skybox right now... could be improved of course!\n vec3 col = vec3(0.23, 0.59, 0.92)*exp(dot(SUN, rd)-0.8);\n col = clamp(col, vec3(0.0), vec3(1.0));\n return vec4(col, 30.0); // some random distance that is positive!\n }\n\n vec3 ground_hit = ro + (rd * ground_dist);\n\n float val = checkerboard(ground_hit.xy, 8.0)* 0.1;\n val += 0.45;\n //val *= 2.0 - length(abs(ground_hit));\n\n // fake sun angle spotlight... TODO actual angle and normal calculation!\n val *= 2.5 - min(2.3, length((-SUN-ground_hit)));//,vec3(0.0,0.0,1.0));\n\n vec3 col = vec3(val);\n return vec4(col, ground_dist);\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // uv normalized to [-1..1] for height with more width\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n vec2 mo = (2.0*iMouse.xy - iResolution.xy)/iResolution.y;\n\n //fragColor = texture(iChannel0, uv);\n //return;\n\n // for when it's just idling...\n float azimuth = -1.9+iTime*0.1 + mo.x; // keeps a bit of residue of the mouse!\n float altitude = 0.7+cos(1.5+iTime*0.25)*0.35;\n if (sign(iMouse.z) > 0.0){\n // orbiting camera setup\n azimuth = PI*mo.x;\n altitude = 0.5*PI*clamp(mo.y+1.0, -0.01, 0.99); // maybe just positive?\n }\n\n // make sure you don't look \"below\"\n altitude = clamp(altitude, HEIGHT_SCALE*0.2, PI);\n\n // a unit length orbit!\n vec3 camera_pos = vec3(\n cos(azimuth)*cos(altitude),\n sin(azimuth)*cos(altitude),\n sin(altitude));\n // the camera is always looking \"at\" the origin or half way above it\n vec3 look_dir = normalize(vec3(0.0, 0.0, HEIGHT_SCALE*0.5) - camera_pos);\n\n\n // TODO moving the camera in and out over time??\n camera_pos += look_dir * -0.75; // moving the camera \"back\" to avoid occlusions?\n // two vectors orthogonal to this camera direction (tagents?)\n //vec3 look_u = camera_pos + vec3(-sin(azimuth), cos(azimuth), 0.0);\n //vec3 look_v = camera_pos + vec3(sin(altitude)*-cos(azimuth), sin(altitude)*-sin(azimuth), cos(altitude));\n\n\n // turns out analytically these aren't correct. so using cross instead -.-\n vec3 look_u = normalize(cross(vec3(0.0, 0.0, -1.0), look_dir));\n vec3 look_v = normalize(cross(camera_pos, look_u)); // is this faster?\n // camera plane(origin of each pixel) -> barycentric?\n\n vec3 camera_plane;\n vec3 ray_dir;\n vec3 ray_origin;\n\n if (FOV > 0.0){\n // assume a pinhole camera.\n // FOV is the horizontal fov, the given focal length becomes:\n // the 1.0 is the sensor height.\n float focal_length = 1.0/tan(radians(FOV*0.5));\n\n // the ro\n camera_plane = camera_pos - (look_dir*focal_length) + ((look_u*uv.x) + (look_v*uv.y))*-1.0; // inverted here to see upright\n ray_origin = camera_pos;\n\n // the rd\n ray_dir = camera_pos-camera_plane;\n ray_dir = normalize(ray_dir);\n }\n\n else {\n // negative FOV values are interpreted as a sensor size for a orthographic camera!\n // horizontal sensor size, -1 would be something sensible... everything else is far away\n float sensor_size = FOV*0.5*-1.0;\n camera_plane = camera_pos + ((look_u*uv.x)+(look_v*uv.y))*sensor_size; // wider fov = larger \"sensor\"\n ray_dir = look_dir;\n ray_origin = camera_plane;\n }\n\n Ray camera = Ray(ray_origin, ray_dir, 1.0/ray_dir);\n\n // actual stuff happening:\n vec4 res = raycast(camera);\n // fragColor = vec4(vec3(res.rgb),1.0);\n //return; // early debug exit\n if (res.a < 0.0) {\n // we missed the initial terrain\n res = sampleGround(ray_origin, ray_dir);\n\n // TODO: the skybox hit returns a negative distance, so we need to handle that\n //res.a = abs(res.a);\n }\n vec3 hit = ray_origin + (ray_dir*res.a);\n\n // ro is a bit offset to reduce start intersections that are noisey ... want a better solution one day.\n Ray sun_check = Ray(hit+0.001*SUN, SUN, 1.0/SUN);\n\n vec4 ref = raycast(sun_check).rgba; //reflection (the full shadow)\n ref.rgb *= 1.0 - step(0.0, ref.a); // this makes misses black?\n // ref.rgb *= 1.0-exp(-shadow_cloud*15.0); // more \"realistic\" cloud shadow?\n\n float shadow_amt = shadow(sun_check);\n // actually more light amount -.-\n // so we add and \"ambient\" base like here\n vec3 col = res.rgb * max(0.6, shadow_amt);\n\n\n // distance fog? I don't like it so it's commented out\n // float dist_fog = transmittance(res.a *0.015);\n // vec3 fog_col = vec3(0.4, 0.5, 0.9);\n // col = mix(col, fog_col, 1.0-dist_fog);\n\n // TODO: better \"shadow\" value via actually colored shadow??\n // vec3 col2 = res.rgb + ref.rgb*0.3;\n // col = vec3(uv.x > 0.0 ? col.rgb : col2.rgb);\n\n fragColor = vec4(vec3(col),1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [
{
"id": 21,
"src": "/media/a/ec8a6ea755d34600547a5353f21f0a453f9f55ff95514383b2d80b8d71283eda.mp3",
"ctype": "music",
"channel": 1,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "// Buffer A read the music and holds a ringbuffer past.\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy; \n ivec2 st = ivec2(fragCoord);\n // buffer length is essentially iResolution.y\n \n vec4 samp = vec4(0.0); \n if (st.x > 0) {\n // previous state, shifted by 1\n samp = texelFetch(iChannel0, st-ivec2(1,0), 0);\n \n \n // as an alternative, you can use this 2nd line here to get smoothing for free\n // as the texture is sampled with linear.\n //samp = texture(iChannel0, uv-vec2(1.0/iResolution.y, 0.0)); \n }\n else {\n // new value in .r and .b\n samp.x = texelFetch(iChannel1, ivec2(st.y, 0), 0).x;\n samp.y = 0.2 * texelFetch(iChannel1, ivec2(st.y, 1), 0).x;\n }\n \n // just to have something in the channel since it's also used to calculate height!\n samp.z = 1.0 - uv.x; \n fragColor = vec4(samp);\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
}
]
},
{
"ver": "0.1",
"info": {
"id": "wc33WX",
"date": "1747616947",
"viewed": 222,
"name": "Random Beat every min - 87 years",
"username": "jakel101",
"description": "Discussed this idea on Discord the other night, decided to sit down and implement it. Needs better instruments still!\ncan get out of sync after ~1 min, so just reload/recompile and you get something new.\n\n(instrument improvements in progress)",
"likes": 9,
"published": 3,
"flags": 8,
"usePreview": 0,
"tags": [
"random",
"audio",
"drums"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents =_=\n// attributions welcome!\n// would love to hear abour your interpretation for this!\n\n\n/*\n* \n* Image pass holds the visualization, it's might get out of sync with the music for now :/\n* Common pass holds the generation logic\n* Sound pass plays the instruments for 3 minutes.\n* recompile to get new patterns!\n* this should only repeat every ~ 87 years so enjoy some unique patterns!\n*/\n\n# define LINE 0.005\n// TODO: better pixel width\n\n// informed by: https://youtu.be/PMltMdi1Wzg\nfloat sdLineSegment(vec2 p, vec2 a, vec2 b) {\n // pos, start, end;\n float d; \n // how far along we are between the points. \n float h = clamp(dot(p-a, b-a)/(length(b-a)*length(b-a)), 0.0, 1.0);\n // point along the line between the two points\n vec2 q = mix(a, b, h); \n //d = min(length(p-a),length(p-b)); // this actually doesn't matter anymore\n d = length(p-q); // debug, show point in between...\n return d;\n}\n\n// axis aligned box with center offset\nfloat dBox(vec2 p, vec2 c, float h, float w){\n vec2 s = (p-c); // shift the center\n \n float d = length(max(abs(s) - vec2(h/2.0,w/2.0), 0.0));\n \n return d;\n}\n\n\n// helper functio to draw the blank lines/their mask\nfloat blank(vec2 p) {\n // from, x:-0.85 to 0.85 (so 16 notes fit in nicely)\n // lines at y: -0.2, 0.1, 0.0, 0.1, 0.2\n // big lines at either end\n float mask = 0.0; \n float width = 0.85;\n float vert_space = 0.1;\n \n // five horizontal lines\n int i; \n for (i=-2; i<=2; i++){\n float height = vert_space*float(i);\n float l = sdLineSegment(p, vec2(width, height), vec2(-width, height));\n mask += smoothstep(LINE, 0.0, l);\n }\n \n // maybe use rectanle instead?\n float left = dBox(p, vec2(-width, 0.0), 0.01, vert_space*4.0);\n float right = dBox(p, vec2(width, 0.0), 0.01, vert_space*4.0);\n mask += smoothstep(LINE, 0.0, left);\n mask += smoothstep(LINE, 0.0, right);\n \n return mask; //step(0.5, mask); // step to make it more solid (really whacky)\n}\n\n\n// helper function to the top of the beamed notes\nfloat beams(vec2 p) {\n // should be double beamed 1/16th?\n // from like -0.8 to -0.5; -0.4 to 0.0; ...?\n float mask;\n float height = 0.4;\n int i;\n for (i=0; i<4; i++){\n float start = -0.71 + float(i)*0.4;\n float l1 = dBox(p, vec2(start + 0.15, height + 0.05), 0.3, 0.03); \n float l2 = dBox(p, vec2(start + 0.15, height), 0.3, 0.015); \n mask += smoothstep(LINE, 0.0, l1);\n mask += smoothstep(LINE, 0.0, l2);\n } \n \n return mask; \n}\n\n// draw notes (masks)\nfloat note(vec2 p, int time, uint type) {\n // type: 0 - kick, 1 - snare, 2 - hi hat\n // kick between 1 and 2 (height -0.3)\n // snare is between 3 and 4 (height 0.1)\n // hi hat is a ghost note (X) above 5 (height 0.5)\n // time is 0..16 at 0.1 intervals \n float mask;\n float size = 0.04;\n float x = -0.75 + float(time)*0.1;\n float y = -0.15 + float(type)*0.2; // height\n \n \n float dist_note;\n float stem_bot = y;\n if (type > 1u){\n // hh ghostnote!\n float l1 = sdLineSegment(p, vec2(x+size,y+size), vec2(x-size,y-size)); \n float l2 = sdLineSegment(p, vec2(x+size,y-size), vec2(x-size,y+size)); \n dist_note = min(l1, l2);\n stem_bot += size;\n } \n else {\n // TODO: little twist!\n dist_note = length(p-vec2(x,y)) - size;\n }\n float stem_dist = sdLineSegment(p, vec2(x+size, stem_bot), vec2(x+size, 0.45));\n \n mask += smoothstep(LINE, 0.0, min(dist_note, stem_dist));\n //mask += smoothstep(LINE, 0.0, stem_dist);\n return mask;\n \n}\n\n// mask again...\nfloat notes(vec2 p, uint beat){\n int i;\n float mask;\n for (i=0; i<16; i++){\n uint type = beat & 3u; // select lowest two bits\n type = min(type, 2u); // this shouldn't exist anyway.\n beat = beat >> 2; // shift to bits out for next step\n float note_mask = note(p, i, type);\n mask += note_mask;\n }\n return mask;\n}\n\n\n// vizualize the BPM?\nfloat indicator(vec2 p){\n float mask;\n float beat_time = mod(iTime, (60.0*4.0/BPM))/(60.0*4.0/BPM); // for test\n \n vec2 pos = vec2(-0.75 + beat_time*(2.0*0.75), -0.5);\n \n float d = length(p-pos);\n mask = smoothstep(0.1, 0.0, d);\n return mask;\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n uint beat = beat_hash();\n vec2 uv = fragCoord/iResolution.xy; \n uv -= vec2(0.5);\n uv *= 2.0;\n \n float line_dist = sdLineSegment(uv, vec2(0.85, 0.3), vec2(-0.85, 0.3));\n\n //TODO: more interesting background texture, maybe paper\n vec3 col = vec3(0.95, 0.98, 0.90);\n \n float bg_lines = blank(uv);\n float tops = beams(uv);\n float notes_mask = notes(uv, beat);\n float indicator_mask = indicator(uv);\n col = mix(col, vec3(0.01), bg_lines);\n col = mix(col, vec3(0.02), tops);\n col = mix(col, vec3(0.03), notes_mask); \n col = mix(col, vec3(0.85, 0.23, 0.02), indicator_mask);\n fragColor = vec4(col,1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [],
"outputs": [],
"code": "# define LIFE 6\n// linetime of 2^n seconds per beat. Meant to be 6, but 0 or 1 can be used for testing\n\n# define BPM 65.0\n// change, recompile, play and then rewind!\n\n\n// TODO: does this hash every repeat? I am not sure\n// via https://www.shadertoy.com/view/XlGcRh\n// https://www.pcg-random.org/\nuint pcg(uint v)\n{\n\tuint state = v * 747796405u + 2891336453u;\n\tuint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n\treturn (word >> 22u) ^ word;\n}\n\n//TODO this is not at all accurate, but good enough for the demo\nuint unixTime(){\n // deconstruct iDate back into a 32 bit unix timestamp:\n // iDate.x = years\n // iDate.y = months\n // iDate.z = days\n // iDate.w = seconds (including fractionals)\n // TODO month/day is off by one?\n uint time;\n \n uvec4 date = uvec4(iDate);\n date.x -= 1970u; // offset since Unix epoch!\n \n // lazy addition for now to get a value.\n // TODO: improve values.\n time += uint(365.24 * float(date.x )) * 24u * 60u * 60u;\n time += 30u*date.y * 24u * 60u * 60u;\n time += date.z * 24u * 60u * 60u;\n time += date.w;\n \n // On the Image pass we subtract the current runtime to get the time during compilation\n // this is meant to match the Sound pass compilation varaint in the future.\n //time -= uint(iTime); // can be out of sync -.-\n return time;\n\n}\n\n// 32 bit value interpreted at trinary by reading 2 bits with max(2)\n// this should hash\nuint beat_hash(){\n uint beat;\n \n // placeholder\n beat = unixTime();\n // to cause variations about every ~1 minute, we ignore the lowest 6 bits\n // our target space is 3^16 which needs less than 26 bits.\n uint seed = beat >> LIFE; // 6 bits to only change every 64 seconds, reduced for testing\n uint max_val = 43046720u; //3^16 -1\n seed = pcg(seed);\n //seed = seed%max_val; // so it rolls over?\n \n \n \n // convert to (binary-encoded) trineary representation: \n int i;\n uint val = seed; // this is too imprecise? we lose precision\n for (i=0; i<16; i++){\n uint tri = val%3u;\n val = val/3u; // move to quotient for the next part\n // fill two bits at a time\n beat = beat << 2u;\n beat += uint(tri);//uint(tri); \n } \n return beat;\n}\n",
"name": "Common",
"description": "",
"type": "common"
},
{
"inputs": [],
"outputs": [],
"code": "# define TAU 6.2831\n\n/* \n* I welcome any improvements to the instruments:\n* the envelope is awfully cutoff\n* better drum machines exist on Shadertoy alreay\n* there is a progressively growing undertone, maybe it can be corrected\n*/\n\n// sorta useful as white noise for now\nfloat noise(float t){\n // really bad idea but will do for now...\n uint u = floatBitsToUint(t);\n u = pcg(u); \n float n = uintBitsToFloat(u);\n n = clamp(n, -1.0, 1.0);\n return n;\n}\n\n// TODO easier attach/pluck\nfloat envelope_drum(float t, float target_t) {\n // this is a bad idea to cutoff the attack!!\n if (t < target_t + 0.025) return 0.0;\n \n // shifts the wave to the target place\n float t2 = t-target_t+0.05;\n // avoid the hard cuttoffs!\n return max(0.0, 5.0*t2*exp(-50.0*(t2-0.095)));\n}\n\n// TODO: better instruments\nfloat kick(float t, float beat_time, float hit_time) {\n float env = envelope_drum(beat_time, hit_time);\n env = smoothstep(0.05, 0.95, env);\n float freq = 1.0-sinh((beat_time-hit_time)*TAU/2.0)*66.0;\n float tone = sin(TAU*freq);\n return 1.5*tone*env;\n}\n\nfloat snare(float t, float beat_time, float hit_time) {\n float env = envelope_drum(beat_time, hit_time);\n env = smoothstep(0.01, 0.9, env); // cut this off more\n // TODO: follow the concept: https://youtu.be/hULEn2_4Unw\n float freq = 1.0-tan((beat_time-hit_time)*TAU/2.0)*221.0;\n float tone = sin(TAU*freq);\n float noise = noise(t);\n return (0.02*noise+tone)*env;\n}\n\nfloat hihat(float t, float beat_time, float hit_time) {\n // TODO: better hi hat hit \n float env = envelope_drum(beat_time, hit_time);\n env = tan(env);\n // fake gate\n float gate = smoothstep(0.20,0.80, env);\n float noise = noise(beat_time+t);\n noise = clamp(noise, 0.0, 1.0);\n \n float tone = sin(TAU * 3420.0 * t);\n tone *= sin(tone*8.20);\n \n \n float signal = (0.5*tone+noise)*gate*env;\n \n return signal;\n}\n\n// returns the instrument[0,1,2] type per measure\nint player(int measure, uint beat){\n // read exactly the two bits in the 32-bit uint for this measure\n uint type = (beat >> 2*measure) & 3u;\n return int(type);\n}\n\n\nvec2 mainSound( int samp, float time )\n{\n // there probably needs to be an additional time so the cutoff isn't this random!\n float beat_time = mod(time, (60.0*4.0/BPM))/(60.0*4.0/BPM); \n // can we avoid the mod and instead do a gate/trigger?\n \n uint beat = beat_hash();\n \n float signal;\n \n int i;\n for (i=0; i<16; i++){\n int type = player(i, beat);\n float hit_time = float(i)/16.0; \n float sound;\n if (type == 0) {\n sound = kick(time, beat_time, hit_time);\n }\n else if (type == 1) {\n sound = snare(time, beat_time, hit_time);\n }\n else if (type == 2) {\n sound = hihat(time, beat_time, hit_time);\n }\n // TODO: proper mixing\n signal += sound; \n }\n \n // lots of fun with streo left over here.\n return vec2(signal);\n}",
"name": "Sound",
"description": "",
"type": "sound"
}
]
},
{
"ver": "0.1",
"info": {
"id": "3f33zl",
"date": "1747344200",
"viewed": 84,
"name": "casting tests",
"username": "jakel101",
"description": "testing how color values get cast into 8unorm",
"likes": 0,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"test"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "void mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n \n if (uv.y > 0.5) (uv.x-=0.5);\n \n \n vec3 col;\n \n col.r = uv.x;\n col.g = (float(int(uv.x*255.0+0.5)))/255.0; \n col.b = (float(trunc(uv.x*255.0+0.5)))/255.0; \n \n fragColor = vec4(col,1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "w32XD1",
"date": "1746067136",
"viewed": 77,
"name": "displacing tiling in 2D??",
"username": "jakel101",
"description": "WIP where you distort the tile UV (and later ID) so they are no longer squares",
"likes": 0,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"tiling"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "#define PI 3.14192\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n uv *= 3.0;\n uv.x += asin(fract(uv.y-0.5)+0.5)*(1.0/PI);\n \n // TODO: uv per shape, not per square.\n // +vec2(0.0, smoothstep(0.0, 1.0, fract(uv.x))))\n vec2 cellUV = fract(uv);\n vec2 cellID = floor(uv);\n \n // checkerboard trick\n float check = clamp(0.0, 1.0, \n (mod(cellID.y, 2.0) + mod(cellID.x, 2.0))) \n - ((mod(cellID.y, 2.0) * mod(cellID.x, 2.0)));\n \n vec3 col = vec3(cellUV, 0.0);//, check);\n fragColor = vec4(col,1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "tXlGR7",
"date": "1742863063",
"viewed": 111,
"name": "buffer self reference test",
"username": "jakel101",
"description": "little illustration on how multiple buffer pass interact with one another and their render order. used for unit testing",
"likes": 1,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"test",
"multipass"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 258,
"src": "/media/previz/buffer01.png",
"ctype": "buffer",
"channel": 1,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 not patents [*|*]\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n vec4 c0 = texture(iChannel0, uv);\n vec4 c1 = texture(iChannel1, uv);\n fragColor = vec4(mix(c0.rgb, c1.rgb, step(0.5, uv.x)), 1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
},
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 258,
"src": "/media/previz/buffer01.png",
"ctype": "buffer",
"channel": 1,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 257,
"channel": 0
}
],
"code": "void mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n vec4 c0 = texture(iChannel0, uv); //self\n c0 += fract(iTime + uv.y) - 0.5;\n vec4 c1 = texture(iChannel1, uv);\n c1 += fract(iTime - uv.y) - 0.5;\n fragColor = vec4(mix(c0.rgb, c1.rgb, step(0.25, uv.x)), 1.0);\n}",
"name": "Buffer A",
"description": "",
"type": "buffer"
},
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
},
{
"id": 258,
"src": "/media/previz/buffer01.png",
"ctype": "buffer",
"channel": 1,
"sampler": {
"filter": "linear",
"wrap": "clamp",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 258,
"channel": 0
}
],
"code": "void mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n vec4 c0 = texture(iChannel0, uv); \n c0 += fract(iTime + uv.y) - 0.5;\n vec4 c1 = texture(iChannel1, uv); //self\n c1 += fract(iTime - uv.y) - 0.5;\n fragColor = vec4(mix(c0.rgb, c1.rgb, step(0.75, uv.x)), 1.0);\n}",
"name": "Buffer B",
"description": "",
"type": "buffer"
}
]
},
{
"ver": "0.1",
"info": {
"id": "M3VBWt",
"date": "1738883384",
"viewed": 253,
"name": "pathtraced heightmap (WIP)",
"username": "jakel101",
"description": "goal is to display some texture as a 3D heighmap. like an array of columns maybe? for development progress see: [url=https://github.com/Vipitis/shader_tracker/blob/main/jakel101/M3VBWt_pathtraced_heightmap__WIP_/Image.frag]GitHub[/url]\n",
"likes": 5,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"25d",
"heightmap",
"pathracing"
],
"hasliked": 0,
"retrieved": "2025-09-09T21:55:41.107497+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 16,
"src": "/media/a/3083c722c0c738cad0f468383167a0d246f91af2bfa373e9c5c094fb8c8413e0.png",
"ctype": "texture",
"channel": 0,
"sampler": {
"filter": "mipmap",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents \\_%_/\n\n/* Image pass shader to draw a texture/buffer/input as a heightmap\n* with some pathtracing as pillars of pixels.\n* Meant to be used in multiple projects and therefore\n* easily configurable at the top with a few macros\n*\n* selflink: https://www.shadertoy.com/view/M3VBWt\n* other projects using this shader/framework: https://www.shadertoy.com/playlist/mX2cD3\n* \n* work in progress:\n* todo(ideas):\n* - monte carlo light simulation\n* - pysics simulation\n* - ball/area lights\n* - infinite/LOD tiles?\n* - DDA like traversal\n* - cleanup as usual\n* feedback/improvements welcome here.\n*/\n\n\n# define PI 3.141592653\n# define HEIGHT_SCALE 0.5\n\n# define CELLS ivec2(iChannelResolution[0].x, iChannelResolution[0].y)\n//# define CELLS ivec2(3)\n\n// unsure yet where to bring this!\n# define SUN normalize(vec3(sin(iDate.w*0.5), cos(iTime), HEIGHT_SCALE*1.5))\n// normalize(vec3(3.0, -5.0, 2.0))\n\n// horizontal FOV, if you use negative values the camera will be orthographic!\n// examples:\n// FOV -1.0 for orthographic (sensor size)\n// FOV 90.0 for perspective wide\n// FOV 45.0 for perspective narower\n# define FOV 90.0\n\n// how far \"behind\" the camera is behind the arcball\n# define CAMERA_DIST -0.65\n\n# define BALL_SIZE 0.25\n\n// TODO one variable to change between sampled and direct light\n// 0 -> directional light\n// 1 -> point light\n// 2 -> MIS? (one light, one sampled?)\n// 3+ -> bounces//samples?\n# define BOUNCES 4\n# define SAMPLES 8\n\nstruct Material{\n vec3 col; // ground color (or texture?)\n float emissivity; //emitted light in some unit?\n float roughness; // invers reflectivity, sorta\n float translucency; // something like 1.0 for glass and 0.0 for solids? -> rays split/sample/refract??\n float IOR; // index of refraction\n};\n\n// edit these here to change the look and feel!\nMaterial chalk = Material(vec3(1.0), 0.0, 0.65, 0.0, 1.3);\nMaterial ground = Material(vec3(0.5), 0.0, 0.25, 0.0, 0.0);\nMaterial sky = Material(vec3(0.02, 0.3, 0.85), 1.0, 0.90, 0.0, 0.0);\nMaterial glass = Material(vec3(1.0), 0.0, 0.02, 0.9, 1.5);\n\n\nivec2 worldToCell(vec3 p) {\n // move world space again\n p += 1.0;\n p *= 0.5;\n ivec2 st = ivec2((p.xy*vec2(CELLS.xy)));\n // TODO: find an actual solution to the edge cases!\n st = min(st, CELLS -1);\n return st;\n}\n\nstruct Ray{\n vec3 origin;\n vec3 dir;\n vec3 inv_dir; // for speedup?\n};\n\n// helper constructor\nRay newRay(vec3 ro, vec3 rd){\n return Ray(ro, rd, 1.0/rd);\n}\n\n\nstruct IntersectionInfo{\n bool hit;\n // rest illdefined for a miss\n bool inside;\n vec3 entry;\n vec3 exit;\n vec3 entry_norm;\n vec3 exit_norm;\n float entry_dist;\n float exit_dist;\n};\n\n// sorta reference: https://tavianator.com/2022/ray_box_boundary.html\nIntersectionInfo AABB(vec3 center, vec3 size, Ray ray){\n IntersectionInfo res;\n\n vec3 pos = center + size;\n vec3 neg = center - size;\n\n vec3 pos_dist = (pos-ray.origin) * ray.inv_dir;\n vec3 neg_dist = (neg-ray.origin) * ray.inv_dir;\n\n vec3 min_dist = min(pos_dist, neg_dist);\n vec3 max_dist = max(pos_dist, neg_dist);\n\n res.entry_dist = max(max(min_dist.x, min_dist.y), min_dist.z);\n res.exit_dist = min(min(max_dist.x, max_dist.y), max_dist.z);\n\n // normals point away from the center\n res.entry_norm = -sign(ray.dir) * vec3(greaterThanEqual(min_dist, vec3(res.entry_dist)));\n res.exit_norm = sign(ray.dir) * vec3(lessThanEqual(max_dist, vec3(res.exit_dist)));\n\n // essentially methods?\n res.entry = ray.origin + ray.dir*res.entry_dist;\n res.exit = ray.origin + ray.dir*res.exit_dist;\n\n res.hit = res.entry_dist < res.exit_dist && res.exit_dist > 0.0;\n res.inside = res.entry_dist < 0.0; // entry behind us\n\n return res;\n}\n\n// with help from: https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-sphere-intersection.html\nIntersectionInfo Sphere(vec3 center, float radius, Ray ray){\n IntersectionInfo res;\n vec3 local = ray.origin - center;\n \n float a = dot(ray.dir, ray.dir);\n float b = 2.0* dot(ray.dir, local);\n float c = dot(local, local) - pow(radius,2.0);\n \n float discriminant = pow(b,2.0) - 4.0*a*c;\n \n res.hit = discriminant >= 0.0;\n \n float t0 = (-b + sqrt(discriminant))/ (2.0*a);\n float t1 = (-b - sqrt(discriminant))/ (2.0*a);\n\n res.entry_dist = min(t0, t1);\n res.exit_dist = max(t0, t1);\n \n if (res.entry_dist < 0.0 && res.exit_dist < 0.0){\n res.hit = false;\n }\n\n res.entry = ray.origin + ray.dir * res.entry_dist;\n res.exit = ray.origin + ray.dir * res.exit_dist;\n\n res.entry_norm = normalize(res.entry - center);\n res.exit_norm = normalize(res.exit - center);\n \n res.inside = res.entry_dist < 0.0 && res.exit_dist > 0.0; // entry behind us\n\n return res;\n}\n\n\nIntersectionInfo pillar_hits(ivec2 cell, float height, Ray ray){\n // let's move the pillar into world space by having it's center + extends\n\n vec3 extend = vec3(1.0/vec2(CELLS), abs(height)*0.5);\n vec3 p = vec3(cell.xy, abs(height)*0.5);\n p.xy *= extend.xy;\n p.xy *= 2.0;\n p.xy -= 1.0 - extend.xy; // not quite the offset?\n //extend.z = extend.y; // make them cubes?\n\n // for the case of clouds the box is at the top?\n if (height < 0.0){\n p.z = HEIGHT_SCALE*(1.0-abs(height*0.5));\n }\n\n // TODO: redo this math when less asleep...\n IntersectionInfo res = AABB(p, extend, ray);\n return res;\n}\n\n\nvec4 sampleHeight(ivec2 cell){\n // to allow for more complex math to determine height\n // .rgb should just return the texture color or some modification of it\n //cell.x = (cell.x + iFrame) % int(iChannelResolution[0].x); // fun texture scroll\n vec4 tex = texelFetch(iChannel0, cell, 0);\n vec4 res;\n res.a = tex.r + tex.g + tex.b; // we do height by a sum of the color for now\n res.a *= 0.33;\n res.rgb = tex.rgb; // simply copy the color as the \"texture\" for now\n \n // res.a = tex.a; // debug/use existing height data.\n res.a *= HEIGHT_SCALE;\n return res;\n}\n\n// from: https://www.shadertoy.com/view/7l3yRn\nvec2 get_random_numbers(inout uvec2 seed) {\n // This is PCG2D: https://jcgt.org/published/0009/03/02/\n seed = 1664525u * seed + 1013904223u;\n seed.x += 1664525u * seed.y;\n seed.y += 1664525u * seed.x;\n seed ^= (seed >> 16u);\n seed.x += 1664525u * seed.y;\n seed.y += 1664525u * seed.x;\n seed ^= (seed >> 16u);\n // Convert to float. The constant here is 2^-32.\n return vec2(seed) * 2.32830643654e-10;\n}\n\n// also from above\n// TODO collaplse into one function!\n// Given uniform random numbers u_0, u_1 in [0,1)^2, this function returns a\n// uniformly distributed point on the unit sphere (i.e. a random direction)\n// (omega)\nvec3 sample_sphere(vec2 random_numbers) {\n float z = 2.0 * random_numbers[1] - 1.0;\n float phi = 2.0 * PI * random_numbers[0];\n float x = cos(phi) * sqrt(1.0 - z * z);\n float y = sin(phi) * sqrt(1.0 - z * z);\n return vec3(x, y, z);\n}\n\n\n// Like sample_sphere() but only samples the hemisphere where the dot product\n// with the given normal (n) is >= 0\nvec3 sample_hemisphere(vec2 random_numbers, vec3 normal) {\n vec3 direction = sample_sphere(random_numbers);\n if (dot(normal, direction) < 0.0)\n direction -= 2.0 * dot(normal, direction) * normal;\n return direction;\n}\n\n\nstruct RaycastInfo{\n bool hit; // if negative, the rest is undefined.\n float dist; // hit_info.entry_dist redundant?\n //ivec2 cell; //current_cell?\n IntersectionInfo hit_info; //has the entry norm etc.\n vec3 col; // TODO: replace with material\n //Ray ray; //just as a reference?\n};\n\n\nRaycastInfo raycast(Ray ray){\n // cast the ray untill there is a hit or we exit the box\n // \"any hit\" shader?\n RaycastInfo result;\n \n IntersectionInfo box = AABB(vec3(0.0, 0.0, HEIGHT_SCALE*0.5), vec3(1.0, 1.0, HEIGHT_SCALE*0.5), ray);\n\n vec3 entry = box.entry;\n\n if (!box.hit){\n // if we \"MISS\" the whole box (not inside?).\n result.hit = false;\n return result;\n \n }\n // everything below here is inside the box\n if (box.inside){\n // if we are \"inside\" the entry should just be ro!\n entry = ray.origin; // maybe problems with distance caluclations at the end?\n }\n \n ivec2 current_cell = worldToCell(entry); // TODO: this one is problematic!\n int i;\n ivec2 max_cells = CELLS - min(current_cell, CELLS-current_cell);\n int max_depth = (max_cells.x + max_cells.y)+2; // could also be min!\n for (i = 0; i < max_depth; i++){\n if (current_cell.x < 0 || current_cell.x >= CELLS.x ||\n current_cell.y < 0 || current_cell.y >= CELLS.y){\n // we marched far enough are are \"outside the box\" now!\n result.hit = false; \n return result;\n }\n\n vec4 tex = sampleHeight(current_cell);\n IntersectionInfo pillar = pillar_hits(current_cell, tex.a, ray);\n\n if (pillar.hit) {\n // \"any hit\" (side/top/bot) -> loop ends here\n // do a little bit of light sim by doing diffuse \"block of chalk\"\n vec3 col = tex.rgb;\n // TODO materail decision here?\n result.hit = true;\n result.hit_info = pillar;\n result.dist = pillar.entry_dist;\n result.col = col;\n return result; \n }\n\n // check if our exit distance larger than the box, means we should be at the final pillar...\n if (pillar.exit_dist >= box.exit_dist){\n result.hit = false;\n return result; // do we ever get here?\n }\n\n // the step\n // TODO: DDA style decision\n ivec2 next_cell = current_cell + ivec2(pillar.exit_norm.xy);\n if (next_cell == current_cell){\n // in this case we do another raycast - but without any Z component\n // so the vector is sideways and points to a new cell!\n vec3 flat_rd = vec3(ray.dir.xy, 0.0);\n Ray flat_ray = Ray(ray.origin, flat_rd, 1.0/flat_rd);\n\n IntersectionInfo grid = pillar_hits(current_cell, 1.0, flat_ray);\n next_cell += ivec2(grid.exit_norm.xy); // TODO check if this norm is correct!\n }\n // for next iteration\n current_cell = next_cell;\n }\n \n result.hit = false;\n return result;\n}\n\n// more like a bad shadowmap\n// idea for the future: precompute the horizon per pixel: https://youtu.be/LluCbGdi-RM\nfloat directional_light(Ray sun_ray, vec3 normal){\n // return the amount of shadowed?\n // we are now marching upwards from some hit\n // ro is essentially the point we started from\n // rd is the sun angle\n RaycastInfo res = raycast(sun_ray);\n //return res.a;\n \n //TODO: intensity/color?\n \n float amt = 1.0;\n \n \n if (!res.hit){// || (ro + rd*res.a).z >= HEIGHT_SCALE){\n // miss means full sunlight!\n amt = max(0.0, dot(sun_ray.dir, normal));\n }\n else {\n // TODO: use distance?\n amt = 0.1; // additional ambient light from here?\n }\n return amt;\n}\n\n// struct for lights? colored light?\nfloat point_light(vec3 start, vec3 light_pos, float light_intensity, vec3 normal){\n float amount;\n \n vec3 light_dir = normalize(light_pos - start);\n float light_dist = distance(start, light_pos);\n // Ray(hit+0.001*SUN, SUN, 1.0/SUN);\n Ray light_cast = Ray(start + 0.001*light_dir, light_dir, 1.0/light_dir);\n RaycastInfo res = raycast(light_cast);\n \n if (!res.hit || res.dist > light_dist) {\n // either we miss geometry or we hit gometry behind the light\n amount = inversesqrt(light_dist)* light_intensity;\n amount *= max(0.0, dot(normal, light_dir));\n }\n else {\n // hit an intersection before the light, so don't see the light!\n amount = 0.0; \n }\n \n // TODO still needs dot normal!\n return amount;\n}\n\n\n\n// copied from https://www.shadertoy.com/view/M3jGzh\nfloat checkerboard(vec2 check_uv, float cells){\n check_uv *= cells/2.0;\n float rows = float(mod(check_uv.y, 1.0) <= 0.5);\n float cols = float(mod(check_uv.x, 1.0) <= 0.5);\n return float(rows == cols);\n}\n\nstruct HitInfo{\n Material mat;\n float dist;\n vec3 norm;\n vec3 pos;\n bool inside; // for doing glass rays!\n};\n\n\nHitInfo sampleGround(vec3 ro, vec3 rd){\n HitInfo res;\n // TODO: rename to sample skybox maybe? as the ground is sorta part of that...\n float ground_height = 0.0;\n float ground_dist = (ground_height-ro.z)/rd.z;\n // TODO: use the actual sphere for the \"skybox\"\n if (ground_dist < 0.0 ||ground_dist > 10.0) {\n // essentially sky hit instead?\n // just some random skybox right now... could be improved of course!\n vec3 col = vec3(0.23, 0.59, 0.92)*exp(dot(SUN, rd)-0.8);\n col = clamp(col, vec3(0.0), vec3(1.0));\n \n res.mat = sky;\n \n res.mat.col = col; // no longer matches with \"sky\" - so gotta change the above maybe?\n \n res.dist = 10.0;\n res.pos = ro + rd*res.dist;\n res.mat.emissivity *= clamp(smoothstep(res.dist - 8.1, res.dist- 3.0, res.pos.z), 0.0, 1.0);\n res.norm = -rd;\n return res; // some random distance that is positive!\n }\n\n vec3 ground_hit = ro + (rd * ground_dist);\n\n float val = checkerboard(ground_hit.xy, 8.0)* 0.25;\n val += 0.45;\n //val *= 2.0 - length(abs(ground_hit));\n\n // fake sun angle spotlight... TODO actual angle and normal calculation!\n //val *= 2.5 - min(2.3, length((-SUN-ground_hit)));//,vec3(0.0,0.0,1.0));\n\n vec3 col = vec3(val);\n res.mat = ground;\n res.mat.col = col;\n res.dist = ground_dist;\n res.pos = ground_hit;\n res.norm = vec3(0.0, 0.0, 1.0);\n return res;\n}\n\n// TODO for montecarlo we need an external loop around this!\nHitInfo scene(Ray camera){\n HitInfo res;\n \n // terrain\n RaycastInfo terrain = raycast(camera);\n\n // ball\n IntersectionInfo ball = Sphere(SUN, BALL_SIZE, camera);\n\n // five cases: just terrain hit, ball hit, both miss, both hit terrain closer, both hit ball closer\n // idea: get all hits, then calculate closest (sorted?) and then return that. if none return background\n // TODO: redo logic (dynamic arrays?)\n\n if (terrain.hit && (!ball.hit || terrain.dist < ball.entry_dist)) {\n // terrain infront of the ball\n res.mat = chalk;\n res.mat.col = terrain.col; // TODO: material construction\n res.norm = terrain.hit_info.entry_norm;\n res.pos = terrain.hit_info.entry;\n res.inside = terrain.hit_info.inside;\n if (res.inside) {\n res.norm = terrain.hit_info.exit_norm;\n res.pos = terrain.hit_info.exit;\n }\n \n } else if (ball.hit) {\n // ball infront of the terrain\n res.mat = glass; // TODO: glass material?\n res.norm = ball.entry_norm;\n res.pos = ball.entry; \n res.inside = ball.inside;\n if (res.inside) {\n res.norm = ball.exit_norm;\n res.pos = ball.exit;\n }\n \n } else {\n res = sampleGround(camera.origin, camera.dir);\n }\n \n\n return res;\n}\n\n// follow ? https://www.shadertoy.com/view/7l3yRn\nstruct RayRadiance{\n vec3 radiance;\n vec3 throughput_weight;\n};\n\n// reading: https://www.pbr-book.org/4ed/Radiometry,_Spectra,_and_Color/Surface_Reflection\n// further: https://www.pbr-book.org/4ed/Reflection_Models\n// watching maybe: https://youtu.be/wA1KVZ1eOuA\nvec3 brsf(in vec3 rd, in HitInfo hit, inout vec3 next_dir, inout uvec2 seed){\n // returns the outgoing radiance?\n // as well as the next ray direction. (inout)\n \n Material mat = hit.mat;\n vec3 norm = hit.norm;\n // naive reflection model\n vec3 perfect_reflection = reflect(rd, norm);\n next_dir = mix(perfect_reflection, next_dir, mat.roughness);\n \n //native transmission model\n\n vec2 randoms = get_random_numbers(seed);\n if (randoms.x < mat.translucency) {\n float IOR = hit.inside ? mat.IOR : 1.0/mat.IOR;\n vec3 reflect_norm = hit.inside ? norm : -norm;\n vec3 perfect_refraction = refract(rd, -reflect_norm, IOR);\n next_dir = mix(perfect_refraction, -next_dir, mat.roughness);\n //next_dir = perfect_refraction;\n norm = reflect_norm;\n }\n\n vec3 outgoing = mat.col * 2.0 * max(0.0, dot(norm, next_dir));\n return outgoing;\n}\n\n\n\n// factored out to function so the seed changes correctly due to inout -.-\nvec3 get_ray_radiance(HitInfo first_hit, Ray camera, inout uvec2 seed){\n //after get_ray_radiance in https://www.shadertoy.com/view/7l3yRn\n\n // because the first_hit is shared between samples, we can skip the traversal a few times\n vec3 radiance = vec3(first_hit.mat.emissivity);\n vec3 next_dir = sample_hemisphere(get_random_numbers(seed), first_hit.norm);\n vec3 outgoing_radiance = brsf(camera.dir, first_hit, next_dir, seed);\n vec3 throughput_weight = outgoing_radiance;\n \n Ray bounce = newRay(first_hit.pos+0.0001*next_dir, next_dir);\n\n int i;\n for(i=0; i<BOUNCES; i++){\n HitInfo hit = scene(bounce);\n radiance += throughput_weight * hit.mat.emissivity;\n // TODO: technically we could exit here early?\n // random first and then mutated below\n next_dir = sample_hemisphere(get_random_numbers(seed), hit.norm); \n outgoing_radiance = brsf(bounce.dir, hit, next_dir, seed);\n \n throughput_weight *= outgoing_radiance;\n if (lessThanEqual(throughput_weight,vec3(0.0))==bvec3(true)) break; // TODO benchmark if this even works as a speedup\n //if (length(throughput_weight) <= 0.0) break; // version which I don't belive in...\n bounce = newRay(hit.pos+0.0001*next_dir, next_dir);\n }\n\n return radiance;\n}\n\n\n\n// TODO: calucalte the light from that brdf too? HitInfo2 -> RayRadiance, next_dir\n// multiple importance sampling? following: https://lisyarus.github.io/blog/posts/multiple-importance-sampling.html\n// idea being we sample the direct light or direction light once, and then do one random sample. weight them 50/50?\n// TODO: call scene below and loop it?\n// 1. cast scene, 2. accumulate light, 3. get next dir, LOOP\n// add a MAX_bounces or SPP var at the top.\n\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // uv normalized to [-1..1] for height with more width\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n vec2 mo = (2.0*iMouse.xy - iResolution.xy)/iResolution.y;\n \n \n uvec2 seed = uvec2(fragCoord) ^ uvec2(iFrame << 16);\n\n //fragColor = texture(iChannel0, uv);\n //return;\n\n // for when it's just idling...\n float azimuth = -1.9+iTime*0.1 + mo.x; // keeps a bit of residue of the mouse!\n float altitude = 0.7+cos(1.5+iTime*0.25)*0.35;\n if (sign(iMouse.z) > 0.0){\n // orbiting camera setup\n azimuth = PI*mo.x;\n altitude = 0.5*PI*clamp(mo.y+1.0, -0.01, 0.99); // maybe just positive?\n }\n\n // make sure you don't look \"below\"\n altitude = clamp(altitude, HEIGHT_SCALE*0.2, PI);\n\n // a unit length orbit!\n vec3 camera_pos = vec3(\n cos(azimuth)*cos(altitude),\n sin(azimuth)*cos(altitude),\n sin(altitude));\n // the camera is always looking \"at\" the origin or half way above it\n vec3 look_dir = normalize(vec3(0.0, 0.0, HEIGHT_SCALE*0.5) - camera_pos);\n\n\n // TODO moving the camera in and out over time??\n camera_pos += look_dir * CAMERA_DIST; // moving the camera \"back\" to avoid occlusions?\n // two vectors orthogonal to this camera direction (tagents?)\n //vec3 look_u = camera_pos + vec3(-sin(azimuth), cos(azimuth), 0.0);\n //vec3 look_v = camera_pos + vec3(sin(altitude)*-cos(azimuth), sin(altitude)*-sin(azimuth), cos(altitude));\n\n\n // turns out analytically these aren't correct. so using cross instead -.-\n vec3 look_u = normalize(cross(look_dir, vec3(0.0, 0.0, 1.0)));\n vec3 look_v = normalize(cross(look_u, look_dir)); // is this faster?\n // camera plane(origin of each pixel) -> barycentric?\n\n vec3 camera_plane;\n vec3 ray_dir;\n vec3 ray_origin;\n\n if (FOV > 0.0){\n // assume a pinhole camera.\n // FOV is the horizontal fov, the given focal length becomes:\n // the 1.0 is the sensor height.\n float focal_length = 1.0/tan(radians(FOV*0.5));\n\n // the ro\n camera_plane = camera_pos - (look_dir*focal_length) + ((look_u*uv.x) + (look_v*uv.y))*-1.0; // inverted here to see upright\n ray_origin = camera_pos;\n\n // the rd\n ray_dir = camera_pos-camera_plane;\n ray_dir = normalize(ray_dir);\n }\n\n else {\n // negative FOV values are interpreted as a sensor size for a orthographic camera!\n // horizontal sensor size, -1 would be something sensible... everything else is far away\n float sensor_size = FOV*0.5*-1.0;\n camera_plane = camera_pos + ((look_u*uv.x)+(look_v*uv.y))*sensor_size; // wider fov = larger \"sensor\"\n ray_dir = look_dir;\n ray_origin = camera_plane;\n }\n\n // todo extract to a function\n // Ray in -> material/normal out?\n // caluclate and aggregate light throughput?\n // new ray direction based on sampled material/refraction?\n\n Ray camera = newRay(ray_origin, ray_dir);\n vec3 out_col = vec3(0.0);\n \n // primary rays happen here once per frame!\n HitInfo first_hit = scene(camera);\n \n int j;\n for(j=0; j<SAMPLES; ++j){\n vec3 rad = get_ray_radiance(first_hit, camera, seed);\n out_col += rad;\n }\n // average color over all samples\n out_col /= float(SAMPLES);\n \n // TODO gamma correction?\n fragColor = vec4(out_col, 1.0);\n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "X3KyRd",
"date": "1737245413",
"viewed": 92,
"name": "bounding box development",
"username": "jakel101",
"description": "trying to figure out how this would work (without looking it up)\n\nneeded for this unreleased one: https://www.shadertoy.com/view/lXycDz",
"likes": 1,
"published": 3,
"flags": 0,
"usePreview": 0,
"tags": [
"raytracing",
"cube"
],
"hasliked": 0,
"retrieved": "2025-08-20T22:35:16.484822+00:00"
},
"renderpass": [
{
"inputs": [],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patent (O)-(O)\n// working out: https://www.desmos.com/3d/dfrawfz5oy\n// improved: https://www.desmos.com/3d/loyr0cvm2c\n// done:? https://www.desmos.com/3d/bewjnaugsh\n# define PI 3.141592654\n# define FOV 90.0\n\nstruct Ray{\n vec3 origin;\n vec3 dir;\n vec3 inv_dir; // for speedup?\n};\n\nstruct BoxHit{\n bool hit;\n bool inside;\n vec3 entry;\n vec3 exit;\n vec3 entry_norm;\n vec3 exit_norm;\n float entry_dist;\n float exit_dist;\n};\n\n// sorta reference: https://tavianator.com/2022/ray_box_boundary.html\nBoxHit AABB(vec3 center, vec3 size, Ray ray){\n BoxHit res;\n \n vec3 pos = center + size;\n vec3 neg = center - size;\n \n vec3 pos_dist = (pos-ray.origin) * ray.inv_dir;\n vec3 neg_dist = (neg-ray.origin) * ray.inv_dir;\n \n vec3 min_dist = min(pos_dist, neg_dist);\n vec3 max_dist = max(pos_dist, neg_dist);\n \n res.entry_dist = max(max(min_dist.x, min_dist.y), min_dist.z);\n res.exit_dist = min(min(max_dist.x, max_dist.y), max_dist.z);\n \n // essentially methods?\n res.hit = res.entry_dist < res.exit_dist && res.exit_dist > 0.0;\n res.inside = res.entry_dist < 0.0; // entry behind us\n \n res.entry = ray.origin + ray.dir*res.entry_dist;\n res.exit = ray.origin + ray.dir*res.exit_dist;\n \n // normals point away from the center\n res.entry_norm = -sign(ray.dir) * vec3(greaterThanEqual(min_dist, vec3(res.entry_dist)));\n res.exit_norm = sign(ray.dir) * vec3(lessThanEqual(max_dist, vec3(res.exit_dist)));\n \n return res;\n}\n\n// with help from: https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-sphere-intersection.html\nBoxHit Sphere(vec3 center, float radius, Ray ray){\n BoxHit res; \n vec3 local = ray.origin - center;\n \n float a = dot(ray.dir, ray.dir);\n float b = 2.0* dot(ray.dir, local);\n float c = dot(local, local) - pow(radius,2.0);\n \n float discriminant = pow(b,2.0) - 4.0*a*c;\n \n res.hit = discriminant >= 0.0;\n \n float t0 = (-b + sqrt(discriminant))/ (2.0*a);\n float t1 = (-b - sqrt(discriminant))/ (2.0*a);\n\n res.entry_dist = min(t0, t1);\n res.exit_dist = max(t0, t1);\n\n res.entry = ray.origin + ray.dir * res.entry_dist;\n res.exit = ray.origin + ray.dir * res.exit_dist;\n\n res.entry_norm = normalize(res.entry - center);\n res.exit_norm = normalize(res.exit - center);\n \n res.inside = res.entry_dist < 0.0; // entry behind us\n\n return res;\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n // uv normalized to [-1..1] for height with more width\n vec2 uv = (2.0*fragCoord - iResolution.xy)/iResolution.y;\n vec2 mo = (2.0*iMouse.xy - iResolution.xy)/iResolution.y;\n \n //fragColor = texture(iChannel0, uv);\n //return;\n \n // for when it's just idling... \n float azimuth = iTime*0.3 + mo.x; // keeps a bit of residue of the mouse!\n float altitude = cos(iTime*0.5)*0.35; \n if (sign(iMouse.z) > 0.0){\n // orbiting camera setup\n azimuth = PI*mo.x;\n altitude = 0.5*PI*clamp(mo.y, -0.85, 0.99); // maybe just positive?\n }\n \n // make sure you don't look \"below\"\n altitude = clamp(altitude, -PI, PI);\n \n // a unit length orbit!\n vec3 camera_pos = vec3(\n cos(azimuth)*cos(altitude),\n sin(azimuth)*cos(altitude),\n sin(altitude)); \n // the camera is always looking \"at\" the origin or half way above it\n vec3 look_dir = normalize(vec3(0.0, 0.0, 0.0) - camera_pos);\n \n \n // TODO moving the camera in and out over time??\n camera_pos += look_dir * -0.0; // moving the camera \"back\" to avoid occlusions?\n // two vectors orthogonal to this camera direction (tagents?) \n //vec3 look_u = camera_pos + vec3(-sin(azimuth), cos(azimuth), 0.0);\n //vec3 look_v = camera_pos + vec3(sin(altitude)*-cos(azimuth), sin(altitude)*-sin(azimuth), cos(altitude)); \n\n \n // turns out analytically these aren't correct. so using cross instead -.-\n vec3 look_u = normalize(cross(vec3(0.0, 0.0, -1.0), look_dir));\n vec3 look_v = normalize(cross(camera_pos, look_u)); // is this faster?\n // camera plane(origin of each pixel) -> barycentric?\n \n vec3 camera_plane;\n vec3 ray_dir;\n vec3 ray_origin;\n \n if (FOV > 0.0){\n // assume a pinhole camera.\n // FOV is the horizontal fov, the given focal length becomes:\n // the 1.0 is the sensor height.\n float focal_length = 1.0/tan(radians(FOV*0.5));\n \n // the ro\n camera_plane = camera_pos - (look_dir*focal_length) + ((look_u*uv.x) + (look_v*uv.y))*-1.0; // inverted here to see upright\n ray_origin = camera_pos;\n \n // the rd\n ray_dir = camera_pos-camera_plane;\n ray_dir = normalize(ray_dir); \n }\n \n else {\n // negative FOV values are interpreted as a sensor size for a orthographic camera!\n // horizontal sensor size, -1 would be something sensible... everything else is far away\n float sensor_size = FOV*0.5*-1.0;\n camera_plane = camera_pos + ((look_u*uv.x)+(look_v*uv.y))*sensor_size; // wider fov = larger \"sensor\"\n ray_dir = look_dir;\n ray_origin = camera_plane;\n }\n \n \n Ray camera = Ray(ray_origin, ray_dir, 1.0/ray_dir);\n BoxHit res = AABB(vec3(0.0, sin(iTime*0.2), 0.0), vec3(0.5), camera);\n \n //res = Sphere(vec3(0.0), 0.4, camera);\n BoxHit res2 = Sphere(vec3(0.0, 0.0, cos(iTime*0.4)), 0.4, camera);\n \n vec3 col = vec3(0.05);\n \n if (res.hit) {\n col = res.entry_norm + vec3(0.5);\n if (res.inside) {\n //col = vec3(0.5);\n col = res.exit_norm + vec3(0.5);\n }\n }\n else {\n //col = res.exit_norm + vec3(0.5);\n }\n \n if (res2.hit && ((res2.entry_dist > res.exit_dist) || !res.hit) ) {\n col = res2.entry_norm + vec3(0.5);\n \n \n }\n \n \n \n fragColor = vec4(col, 1.0);\n \n}",
"name": "Image",
"description": "",
"type": "image"
}
]
},
{
"ver": "0.1",
"info": {
"id": "lXyyzR",
"date": "1735848777",
"viewed": 140,
"name": "Persistent Music Visualizer",
"username": "jakel101",
"description": "to have some of the element be reactive over time, the persistence is done over time in a Buffer.\nChange the Music in Buffer A Channel1\nChange channel0 to be sampled nearest instead of linear\ndeveloped on 165 Hz, might look worse on 60!",
"likes": 4,
"published": 3,
"flags": 32,
"usePreview": 0,
"tags": [
"music",
"visualizer",
"infinity"
],
"hasliked": 0,
"retrieved": "2025-08-13T20:37:17.529453+00:00"
},
"renderpass": [
{
"inputs": [
{
"id": 257,
"src": "/media/previz/buffer00.png",
"ctype": "buffer",
"channel": 0,
"sampler": {
"filter": "linear",
"wrap": "repeat",
"vflip": "true",
"srgb": "false",
"internal": "byte"
},
"published": 1
}
],
"outputs": [
{
"id": 37,
"channel": 0
}
],
"code": "// Apache 2.0 no patents `.\u00b4\n\n// Image pass is mainly used for displaying the Buffer A \"background\"\n// Visualizer is purely done in Buffer A!\n// to change the music or try something different, change Channel1 in Buffer A!\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord/iResolution.xy;\n\n vec4 bg = texture(iChannel0, uv);\n \n fragColor = vec4(bg.rgb, 1.0);\n}",
"name": "Image",