Compare commits
3 Commits
wye-refact
...
woosuk/fla
| Author | SHA1 | Date | |
|---|---|---|---|
| 2ad6985c49 | |||
| da03cb8f0b | |||
| 90d43db442 |
@ -248,7 +248,9 @@ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
|
||||
|
||||
self.block_table_arange = torch.arange(max_num_pages_per_req,
|
||||
dtype=torch.int32,
|
||||
device=self.device)
|
||||
device="cpu")
|
||||
|
||||
self.sliding_window = getattr(kv_cache_spec, "sliding_window", None)
|
||||
|
||||
def _get_workspace_buffer(self):
|
||||
if self._workspace_buffer is None:
|
||||
@ -487,16 +489,30 @@ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
|
||||
shared_kv_page_indices_cpu = None
|
||||
shared_kv_last_page_len_cpu = None
|
||||
|
||||
max_num_blocks = block_table_bounds_cpu.max()
|
||||
block_table_bounds = block_table_bounds_cpu.to(self.device,
|
||||
non_blocking=True)
|
||||
mask = (self.block_table_arange[:max_num_blocks].unsqueeze(0)
|
||||
< block_table_bounds.unsqueeze(1))
|
||||
max_num_blocks = block_table_bounds_cpu.max().item()
|
||||
arange = self.block_table_arange[:max_num_blocks].unsqueeze(0)
|
||||
mask = arange < block_table_bounds_cpu.unsqueeze(1)
|
||||
if (self.sliding_window is not None and not use_cascade
|
||||
and num_decodes > 0 and
|
||||
max_num_blocks > self.sliding_window // page_size):
|
||||
# NOTE(woosuk): Since FlashInfer's decode kernel doesn't skip the kv
|
||||
# outside the sliding window and only do masking, we manually
|
||||
# manipulate the seq_lens and block table for skipping.
|
||||
# NOTE: Don't apply this optimization to prefill requests.
|
||||
decode_seq_lens_cpu = seq_lens_cpu[:num_decodes]
|
||||
num_skipped_pages = (
|
||||
torch.relu(decode_seq_lens_cpu - self.sliding_window) //
|
||||
page_size)
|
||||
|
||||
block_table_bounds_cpu[:num_decodes] -= num_skipped_pages
|
||||
mask[:num_decodes] &= (arange[:num_decodes]
|
||||
>= num_skipped_pages.unsqueeze(1))
|
||||
|
||||
# write self.paged_kv_indices inplace
|
||||
num_actual_pages = torch.sum(mask)
|
||||
paged_kv_indices = self.paged_kv_indices[:num_actual_pages]
|
||||
torch.masked_select(block_table_tensor[:, :max_num_blocks],
|
||||
mask,
|
||||
mask.to(self.device, non_blocking=True),
|
||||
out=paged_kv_indices)
|
||||
|
||||
# write self.paged_kv_indptr_cpu inplace (0-index is always 0)
|
||||
|
||||
Reference in New Issue
Block a user