Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 7 additions & 10 deletions .github/workflow-template/template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ jobs:
- name: e2e test w/ Hummock # Note: RiseDev cluster cannot be used to run e2e coverage unless we rebuild frontend in this workflow
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'

- name: Kill cluster
Expand All @@ -133,7 +133,7 @@ jobs:
- name: e2e test w/ Hummock w/o cache
run: |
~/cargo-make/makers clean-data
RW_NO_CACHE=1 ~/cargo-make/makers ci-streaming
RW_NO_CACHE=1 ~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'

- name: Kill cluster
Expand All @@ -142,11 +142,10 @@ jobs:
~/cargo-make/makers logs
~/cargo-make/makers check-logs

# TODO: TABLE_V2 can't be distributed now
- name: e2e test batch 1-node (table_v2)
- name: e2e test batch 3-node (table_v2)
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-3node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/batch/**/*.slt'

- name: Kill cluster
Expand All @@ -155,7 +154,6 @@ jobs:
~/cargo-make/makers logs
~/cargo-make/makers check-logs

# TODO: TABLE_V2 can't be distributed now
- name: e2e test batch 3-node (table_v1)
run: |
~/cargo-make/makers clean-data
Expand Down Expand Up @@ -279,11 +277,10 @@ jobs:
- name: Prepare RiseDev playground
run: ~/cargo-make/makers pre-start-playground

# TODO: TABLE_V2 can't be distributed now
- name: Run 3-node regress tests (batch, table_v1)
- name: Run 3-node regress tests (batch)
run: |
~/cargo-make/makers clean-data
RW_FORCE_TABLE_V1=1 ~/cargo-make/makers ci-3node
~/cargo-make/makers ci-3node
psql --version
RUST_LOG=info cd rust && target/debug/risingwave_regress_test -h 127.0.0.1 \
-p ${{ env.RW_PORT }} \
Expand All @@ -300,7 +297,7 @@ jobs:
- name: e2e test w/ source and Hummock
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p ${{ env.RW_PORT }} -d dev './e2e_test/source/**/*.slt'

- name: Kill cluster
Expand Down
14 changes: 7 additions & 7 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ jobs:
- name: e2e test w/ Hummock
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'
- name: Kill cluster
run: |
Expand All @@ -119,17 +119,17 @@ jobs:
- name: e2e test w/ Hummock w/o cache
run: |
~/cargo-make/makers clean-data
RW_NO_CACHE=1 ~/cargo-make/makers ci-streaming
RW_NO_CACHE=1 ~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'
- name: Kill cluster
run: |
~/cargo-make/makers k
~/cargo-make/makers logs
~/cargo-make/makers check-logs
- name: e2e test batch 1-node (table_v2)
- name: e2e test batch 3-node (table_v2)
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-3node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/batch/**/*.slt'
- name: Kill cluster
run: |
Expand Down Expand Up @@ -239,10 +239,10 @@ jobs:
cp risedev-components.ci.env risedev-components.user.env
- name: Prepare RiseDev playground
run: ~/cargo-make/makers pre-start-playground
- name: Run 3-node regress tests (batch, table_v1)
- name: Run 3-node regress tests (batch)
run: |
~/cargo-make/makers clean-data
RW_FORCE_TABLE_V1=1 ~/cargo-make/makers ci-3node
~/cargo-make/makers ci-3node
psql --version
RUST_LOG=info cd rust && target/debug/risingwave_regress_test -h 127.0.0.1 \
-p ${{ env.RW_PORT }} \
Expand All @@ -257,7 +257,7 @@ jobs:
- name: e2e test w/ source and Hummock
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p ${{ env.RW_PORT }} -d dev './e2e_test/source/**/*.slt'
- name: Kill cluster
run: |
Expand Down
14 changes: 7 additions & 7 deletions .github/workflows/pull-request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ jobs:
- name: e2e test w/ Hummock
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'
- name: Kill cluster
run: |
Expand All @@ -118,17 +118,17 @@ jobs:
- name: e2e test w/ Hummock w/o cache
run: |
~/cargo-make/makers clean-data
RW_NO_CACHE=1 ~/cargo-make/makers ci-streaming
RW_NO_CACHE=1 ~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/streaming/**/*.slt'
- name: Kill cluster
run: |
~/cargo-make/makers k
~/cargo-make/makers logs
~/cargo-make/makers check-logs
- name: e2e test batch 1-node (table_v2)
- name: e2e test batch 3-node (table_v2)
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-3node
timeout 10m sqllogictest -p 4567 -d dev './e2e_test/batch/**/*.slt'
- name: Kill cluster
run: |
Expand Down Expand Up @@ -238,10 +238,10 @@ jobs:
cp risedev-components.ci.env risedev-components.user.env
- name: Prepare RiseDev playground
run: ~/cargo-make/makers pre-start-playground
- name: Run 3-node regress tests (batch, table_v1)
- name: Run 3-node regress tests (batch)
run: |
~/cargo-make/makers clean-data
RW_FORCE_TABLE_V1=1 ~/cargo-make/makers ci-3node
~/cargo-make/makers ci-3node
psql --version
RUST_LOG=info cd rust && target/debug/risingwave_regress_test -h 127.0.0.1 \
-p ${{ env.RW_PORT }} \
Expand All @@ -256,7 +256,7 @@ jobs:
- name: e2e test w/ source and Hummock
run: |
~/cargo-make/makers clean-data
~/cargo-make/makers ci-streaming
~/cargo-make/makers ci-1node
timeout 10m sqllogictest -p ${{ env.RW_PORT }} -d dev './e2e_test/source/**/*.slt'
- name: Kill cluster
run: |
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ sqllogictest -p 4567 -d dev './e2e_test/**/*.slt'
To run end-to-end tests with state store, run the script:

```shell
./risedev ci-streaming
./risedev ci-1node
sqllogictest -p 4567 -d dev './e2e_test/**/*.slt'
```

Expand Down
9 changes: 0 additions & 9 deletions Makefile.toml
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,6 @@ command = "rust/target/${BUILD_MODE_DIR}/risedev-playground"
args = ["ci-3node"]
description = "Start a RisingWave playground with ci-3node config"


[tasks.ci-1node]
category = "RiseDev - Start"
dependencies = [
Expand All @@ -186,14 +185,6 @@ command = "rust/target/${BUILD_MODE_DIR}/risedev-playground"
args = ["ci-1node"]
description = "Start a RisingWave playground ci-1node config"

[tasks.ci-streaming]
category = "RiseDev - Start"
dependencies = [
"pre-start-playground"
]
command = "rust/target/${BUILD_MODE_DIR}/risedev-playground"
args = ["ci-streaming"]
description = "Start a RisingWave playground ci-streaming config"

[tasks.dev-compute-node]
category = "RiseDev - Start"
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ Then, simply run:

```
./risedev d # shortcut for ./risedev dev
./risedev ci-streaming
./risedev ci-1node
./risedev ci-3node
./risedev dev-compute-node # compute node will need to be started by you
```
Expand Down
12 changes: 4 additions & 8 deletions risedev.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
risedev:
# The CI-3node configuration will start 3 compute nodes, 1 meta node, 1 frontend.
# The CI-3node configuration will start 3 compute nodes, 1 meta node, 1 frontend and 1 MinIO.
ci-3node:
- use: minio
- use: meta-node
- use: compute-node
port: 5687
Expand All @@ -13,14 +14,8 @@ risedev:
exporter-port: 1224
- use: frontend

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minio should be started at first.

# The CI-1node configuration will start 1 compute node, 1 meta node, 1 frontend.
# The CI-1node configuration will start 1 compute node, 1 meta node, 1 frontend and 1 MinIO.
ci-1node:
- use: meta-node
- use: compute-node
- use: frontend

# The CI-streaming configuration will start 1 compute node, 1 meta node, 1 frontend and 1 MinIO.
ci-streaming:
- use: minio
- use: meta-node
- use: compute-node
Expand Down Expand Up @@ -50,6 +45,7 @@ risedev:

# `dev-frontend` have the same settings as default except the the frontend node will be started by user.
dev-frontend:
- use: minio
- use: meta-node
- use: compute-node
- use: frontend
Expand Down
18 changes: 15 additions & 3 deletions rust/batch/src/executor/insert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ pub struct InsertExecutor {
/// target table id
table_id: TableId,
source_manager: SourceManagerRef,
worker_id: u32,

child: BoxedExecutor,
executed: bool,
Expand All @@ -27,10 +28,16 @@ pub struct InsertExecutor {
}

impl InsertExecutor {
pub fn new(table_id: TableId, source_manager: SourceManagerRef, child: BoxedExecutor) -> Self {
pub fn new(
table_id: TableId,
source_manager: SourceManagerRef,
child: BoxedExecutor,
worker_id: u32,
) -> Self {
Self {
table_id,
source_manager,
worker_id,
child,
executed: false,
schema: Schema {
Expand Down Expand Up @@ -92,7 +99,7 @@ impl Executor for InsertExecutor {

let next_row_id = || match source.source.as_ref() {
SourceImpl::Table(t) => t.next_row_id(),
SourceImpl::TableV2(t) => t.next_row_id(),
SourceImpl::TableV2(t) => t.next_row_id(self.worker_id),
_ => unreachable!(),
};

Expand All @@ -104,7 +111,7 @@ impl Executor for InsertExecutor {
// add row-id column as first column
let mut builder = I64ArrayBuilder::new(len).unwrap();
for _ in 0..len {
builder.append(Some(next_row_id() as i64)).unwrap();
builder.append(Some(next_row_id())).unwrap();
}

let rowid_column = once(Column::new(Arc::new(ArrayImpl::from(
Expand Down Expand Up @@ -180,6 +187,7 @@ impl BoxedExecutorBuilder for InsertExecutor {
table_id,
source.global_task_env().source_manager_ref(),
child,
source.global_task_env().worker_id(),
)))
}
}
Expand Down Expand Up @@ -258,6 +266,7 @@ mod tests {
let mut insert_executor = InsertExecutor {
table_id: table_id.clone(),
source_manager: source_manager.clone(),
worker_id: 0,
child: Box::new(mock_executor),
executed: false,
schema: Schema {
Expand Down Expand Up @@ -332,6 +341,7 @@ mod tests {
let mut insert_executor = InsertExecutor {
table_id: table_id.clone(),
source_manager: source_manager.clone(),
worker_id: 0,
child: Box::new(mock_executor),
executed: false,
schema: Schema {
Expand Down Expand Up @@ -386,6 +396,7 @@ mod tests {
let mut insert_executor = InsertExecutor {
table_id: table_id2.clone(),
source_manager: source_manager.clone(),
worker_id: 0,
child: Box::new(mock_executor),
executed: false,
schema: Schema {
Expand Down Expand Up @@ -519,6 +530,7 @@ mod tests {
table_id.clone(),
source_manager.clone(),
Box::new(mock_executor),
0,
);
insert_executor.open().await.unwrap();
let fields = &insert_executor.schema().fields;
Expand Down
25 changes: 25 additions & 0 deletions rust/batch/src/executor/row_seq_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ pub struct RowSeqScanExecutor {
table: Arc<dyn ScannableTable>,
/// An iterator to scan StateStore.
iter: Option<TableIterRef>,
primary: bool,

column_ids: Vec<i32>,
column_indices: Vec<usize>,
chunk_size: usize,
Expand All @@ -30,6 +32,7 @@ impl RowSeqScanExecutor {
table: Arc<dyn ScannableTable>,
column_ids: Vec<i32>,
chunk_size: usize,
primary: bool,
identity: String,
) -> Self {
// Currently row_id for table_v2 is totally a mess, we override this function to match the
Expand All @@ -49,13 +52,25 @@ impl RowSeqScanExecutor {
Self {
table,
iter: None,
primary,
column_ids,
column_indices,
chunk_size,
schema,
identity,
}
}

// TODO: Remove this when we support real partition-scan.
// For shared storage like Hummock, we are using a fake partition-scan now. If `self.primary` is
// false, we'll ignore this scanning and yield no chunk.
fn should_ignore(&self) -> bool {
if self.table.is_shared_storage() {
!self.primary
} else {
false
}
}
}

impl BoxedExecutorBuilder for RowSeqScanExecutor {
Expand All @@ -78,6 +93,7 @@ impl BoxedExecutorBuilder for RowSeqScanExecutor {
table,
column_ids,
Self::DEFAULT_CHUNK_SIZE,
source.task_id.task_id == 0,
source.plan_node().get_identity().clone(),
)))
}
Expand All @@ -86,11 +102,20 @@ impl BoxedExecutorBuilder for RowSeqScanExecutor {
#[async_trait::async_trait]
impl Executor for RowSeqScanExecutor {
async fn open(&mut self) -> Result<()> {
if self.should_ignore() {
info!("non-primary row seq scan, ignored");
return Ok(());
}

self.iter = Some(self.table.iter(u64::MAX).await?);
Ok(())
}

async fn next(&mut self) -> Result<Option<DataChunk>> {
if self.should_ignore() {
return Ok(None);
}

let iter = self.iter.as_mut().expect("executor not open");

self.table
Expand Down
Loading