|
| 1 | +#ifndef _FLEXFLOW_DECODING_H |
| 2 | +#define _FLEXFLOW_DECODING_H |
| 3 | + |
| 4 | +#include "flexflow/inference.h" |
| 5 | +#include "flexflow/model.h" |
| 6 | +#include "flexflow/layer.h" |
| 7 | +#include "flexflow/node.h" |
| 8 | +#include "flexflow/operator.h" |
| 9 | +#include "flexflow/ops/decoding_params.h" |
| 10 | +#include "flexflow/utils/memory_allocator.h" |
| 11 | +#include "flexflow/fftype.h" |
| 12 | +#include "flexflow/device.h" |
| 13 | + |
| 14 | +namespace FlexFlow { |
| 15 | + |
| 16 | +// forward declaration |
| 17 | +class DecodingMeta; |
| 18 | + |
| 19 | +class Decoding : public Op { |
| 20 | +public: |
| 21 | + using Params = DecodingParams; |
| 22 | + using Input = ParallelTensor; |
| 23 | + Decoding(FFModel &model, |
| 24 | + LayerID const &_layer_guid, |
| 25 | + const ParallelTensor input, |
| 26 | + bool beam_search, |
| 27 | + char const *name); |
| 28 | + Decoding(FFModel &model, |
| 29 | + Params const ¶ms, |
| 30 | + const Input input, |
| 31 | + char const *name = nullptr); |
| 32 | + void init(FFModel const &) override; |
| 33 | + void init_inference(FFModel const &, |
| 34 | + std::vector<ParallelTensor> const &, |
| 35 | + std::vector<ParallelTensor> const &, |
| 36 | + MachineView const *mv = nullptr) override; |
| 37 | + void forward(FFModel const &) override; |
| 38 | + Legion::FutureMap inference(FFModel const &, |
| 39 | + BatchConfigFuture const &, |
| 40 | + std::vector<ParallelTensor> const &, |
| 41 | + std::vector<ParallelTensor> const &, |
| 42 | + MachineView const *mv = nullptr) override; |
| 43 | + Legion::FutureMap peft_bwd(FFModel const &, |
| 44 | + BatchConfigFuture const &, |
| 45 | + std::vector<ParallelTensor> const &, |
| 46 | + std::vector<ParallelTensor> const &, |
| 47 | + MachineView const *mv = nullptr) override; |
| 48 | + void backward(FFModel const &) override; |
| 49 | + void print_layer(FFModel const &model) override { |
| 50 | + assert(0); |
| 51 | + } |
| 52 | + static Op * |
| 53 | + create_operator_from_layer(FFModel &model, |
| 54 | + Layer const *layer, |
| 55 | + std::vector<ParallelTensor> const &inputs); |
| 56 | + static OpMeta *init_task(Legion::Task const *task, |
| 57 | + std::vector<Legion::PhysicalRegion> const ®ions, |
| 58 | + Legion::Context ctx, |
| 59 | + Legion::Runtime *runtime); |
| 60 | + static BeamInferenceResult |
| 61 | + inference_task_beam(Legion::Task const *task, |
| 62 | + std::vector<Legion::PhysicalRegion> const ®ions, |
| 63 | + Legion::Context ctx, |
| 64 | + Legion::Runtime *runtime); |
| 65 | + static InferenceResult |
| 66 | + inference_task_norm(Legion::Task const *task, |
| 67 | + std::vector<Legion::PhysicalRegion> const ®ions, |
| 68 | + Legion::Context ctx, |
| 69 | + Legion::Runtime *runtime); |
| 70 | + static bool peft_bwd_task(Legion::Task const *task, |
| 71 | + std::vector<Legion::PhysicalRegion> const ®ions, |
| 72 | + Legion::Context ctx, |
| 73 | + Legion::Runtime *runtime); |
| 74 | + bool measure_operator_cost(Simulator *sim, |
| 75 | + MachineView const &pc, |
| 76 | + CostMetrics &cost_metrics) const override; |
| 77 | + void serialize(Legion::Serializer &) const override; |
| 78 | + static PCG::Node deserialize(FFModel &ff, |
| 79 | + Legion::Deserializer &d, |
| 80 | + ParallelTensor inputs[], |
| 81 | + int num_inputs); |
| 82 | + Op *materialize(FFModel &ff, |
| 83 | + ParallelTensor inputs[], |
| 84 | + int num_inputs) const override; |
| 85 | + Params get_params() const; |
| 86 | + |
| 87 | + template <typename DT> |
| 88 | + static void inference_kernel(DecodingMeta const *m, |
| 89 | + BatchConfig const *bc, |
| 90 | + DT const *input_ptr, |
| 91 | + DT *softmax_output_ptr, |
| 92 | + int *argmax_output_ptr, |
| 93 | + int num_classes, |
| 94 | + float *loss, |
| 95 | + ffStream_t stream); |
| 96 | + static void inference_kernel_wrapper(DecodingMeta *m, |
| 97 | + BatchConfig const *bc, |
| 98 | + bool is_last_op, |
| 99 | + GenericTensorAccessorR const &input, |
| 100 | + GenericTensorAccessorW const &softmax_output, |
| 101 | + GenericTensorAccessorW const &argmax_output); |
| 102 | + template <typename DT> |
| 103 | + static void peft_bwd_kernel(DecodingMeta const *m, |
| 104 | + BatchConfig const *bc, |
| 105 | + DT *input_grad_ptr, |
| 106 | + int num_classes, |
| 107 | + ffStream_t stream); |
| 108 | + static void peft_bwd_kernel_wrapper(DecodingMeta *m, |
| 109 | + BatchConfig const *bc, |
| 110 | + GenericTensorAccessorW const &input_grad); |
| 111 | + |
| 112 | +public: |
| 113 | + LayerID layer_guid; |
| 114 | + bool beam_search; |
| 115 | +}; |
| 116 | + |
| 117 | +class DecodingMeta : public OpMeta { |
| 118 | +public: |
| 119 | + DecodingMeta(FFHandler handler, |
| 120 | + Decoding const *decoding, |
| 121 | + Legion::Domain const &input_domain, |
| 122 | + bool is_last_op, |
| 123 | + MemoryAllocator &gpu_mem_allocator); |
| 124 | + ~DecodingMeta(void); |
| 125 | + bool beam_search; |
| 126 | + float *probs; |
| 127 | + float *d_loss; |
| 128 | + // Temporary buffers |
| 129 | + int *parent_output_buffer; |
| 130 | + // PEFT related fields |
| 131 | + void *output_grad_ptr = nullptr; |
| 132 | + size_t allocated_peft_buffer_size = 0; |
| 133 | + Realm::RegionInstance reserveInst; |
| 134 | + BatchConfig::TokenId peft_token_ids[BatchConfig::MAX_NUM_TOKENS]; |
| 135 | +}; |
| 136 | + |
| 137 | +}; // namespace FlexFlow |
| 138 | + |
| 139 | +#endif // _FLEXFLOW_DECODING_H |
0 commit comments