diff --git a/arch/src/loongarch64/page_table.rs b/arch/src/loongarch64/page_table.rs
index 34d04a073b6d3f83eac660e5bd7b565585d14596..bcc560eae0b864f406099ba73b6ab55295bef1fb 100644
--- a/arch/src/loongarch64/page_table.rs
+++ b/arch/src/loongarch64/page_table.rs
@@ -58,6 +58,9 @@ impl From<MappingFlags> for PTEFlags {
         if value.contains(MappingFlags::U) {
             flags |= PTEFlags::PLV_USER;
         }
+        if value.contains(MappingFlags::cow){
+            flags |= PTEFlags::cow;
+        }
         flags
     }
 }
@@ -80,6 +83,9 @@ impl Into<MappingFlags> for PTEFlags {
         if self.contains(PTEFlags::PLV_USER) {
             flags |= MappingFlags::U;
         }
+        if self.contains(PTEFlags::cow) {
+            flags |= MappingFlags::cow;
+        }
         flags
     }
 }
@@ -111,6 +117,8 @@ bitflags::bitflags! {
         /// FIXME: Is it just for a huge page?
         /// Linux related url: https://github.com/torvalds/linux/blob/master/arch/loongarch/include/asm/pgtable-bits.h
         const NX = bit!(12);
+        ///
+        const cow = bit!(58);
         /// Whether the privilege Level is restricted. When RPLV is 0, the PTE
         /// can be accessed by any program with privilege Level highter than PLV.
         const RPLV = bit!(63);
diff --git a/arch/src/pagetable.rs b/arch/src/pagetable.rs
index cf34251263c432115c5e065eabf05dd855d404d6..e150328ffdd173dbe9a39d6577dbbea82fa0ee36 100644
--- a/arch/src/pagetable.rs
+++ b/arch/src/pagetable.rs
@@ -27,6 +27,8 @@ bitflags::bitflags! {
         const Device = bit!(8);
         /// Cache Flag, indicating that the page will be cached
         const Cache = bit!(9);
+        /// cow flag
+        const cow = bit!(10);
 
         /// Read | Write | Executeable Flags
         const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
@@ -117,7 +119,41 @@ impl PageTable {
         pte_list[vpn.pn_index(0)] = PTE::new_page(ppn, flags.into());
         TLB::flush_vaddr(vpn.into());
     }
-
+    /// 打印pte flag
+    pub fn get_pte_flags(&self, vpn: VirtPage) -> MappingFlags {
+        assert!(
+            vpn.to_addr() <= Self::USER_VADDR_END,
+            "You only should use the address limited by user {}",vpn.to_addr()
+        );
+        assert!(Self::PAGE_LEVEL >= 3, "Just level >= 3 supported currently");
+        let mut pte_list = Self::get_pte_list(self.0);
+        if Self::PAGE_LEVEL == 4 {
+            let pte = &mut pte_list[vpn.pn_index(3)];
+            if !pte.is_valid() {
+                *pte = PTE::new_table(ArchInterface::frame_alloc_persist());
+            }
+            pte_list = Self::get_pte_list(pte.address());
+        }
+        // level 3
+        {
+            let pte = &mut pte_list[vpn.pn_index(2)];
+            if !pte.is_valid() {
+                *pte = PTE::new_table(ArchInterface::frame_alloc_persist());
+            }
+            pte_list = Self::get_pte_list(pte.address());
+        }
+        // level 2
+        {
+            let pte = &mut pte_list[vpn.pn_index(1)];
+            if !pte.is_valid() {
+                *pte = PTE::new_table(ArchInterface::frame_alloc_persist());
+            }
+            pte_list = Self::get_pte_list(pte.address());
+        }
+        // level 1, map page
+        pte_list[vpn.pn_index(0)].flags().into()
+        
+    }
     /// Mapping a page to specific address(kernel space address).
     ///
     /// TODO: This method is not implemented.
diff --git a/arch/src/riscv64/page_table/sv39.rs b/arch/src/riscv64/page_table/sv39.rs
index 49e7ee6eb8625a4512f2e6610a52d2e45e35a42a..4d5b47db9e93bd427cdc45284aa0e79c01cc4c21 100644
--- a/arch/src/riscv64/page_table/sv39.rs
+++ b/arch/src/riscv64/page_table/sv39.rs
@@ -44,7 +44,7 @@ impl PTE {
 
     #[inline]
     pub const fn flags(&self) -> PTEFlags {
-        PTEFlags::from_bits_truncate((self.0 & 0xff) as u64)
+        PTEFlags::from_bits_truncate((self.0 & 0x1ff) as u64)
     }
 
     #[inline]
@@ -99,6 +99,7 @@ bitflags! {
         const G = bit!(5);
         const A = bit!(6);
         const D = bit!(7);
+        const cow = bit!(8);
 
         #[cfg(c906)]
         const SO = bit!(63);
@@ -142,6 +143,9 @@ impl From<MappingFlags> for PTEFlags {
             if flags.contains(MappingFlags::D) {
                 res |= PTEFlags::D;
             }
+            if flags.contains(MappingFlags::cow) {
+                res |= PTEFlags::cow;
+            }
             res
         }
     }
@@ -171,7 +175,9 @@ impl From<PTEFlags> for MappingFlags {
         if value.contains(PTEFlags::D) {
             mapping_flags |= MappingFlags::D;
         }
-
+        if value.contains(PTEFlags::cow) {
+            mapping_flags |= MappingFlags::cow;
+        }
         mapping_flags
     }
 }
diff --git a/ext4-fs-fuse/src/main.rs b/ext4-fs-fuse/src/main.rs
index 8389455c305111ceff0d470c40b3dada99c180c7..c0f3bd7d855d345a67c2f8c647bbdcf79d51e331 100644
--- a/ext4-fs-fuse/src/main.rs
+++ b/ext4-fs-fuse/src/main.rs
@@ -116,10 +116,10 @@ fn easy_fs_pack() -> std::io::Result<()> {
         sb.ext4fs.ext4_file_write(inode as u64, 0, all_data.as_slice());
     }
     println!("app:----");
-    // list apps
+    /* list apps
      for app in root_dentry.clone().ls() {
          println!("{}", app);
-     }
+     }*/
     drop(root_dentry);
     block_cache_sync_all();
     Ok(())
diff --git a/os/src/main.rs b/os/src/main.rs
index 73e8dcd7f920113c59a1ff37f87dbf7b80737d2a..0eb49849bca91adbb7c8ee060c95803d731c13e9 100644
--- a/os/src/main.rs
+++ b/os/src/main.rs
@@ -70,7 +70,6 @@ use lazy_static::*;
 //use sync::IntrCell;
 use arch::TrapType::*;
 use log::Record;
-use syscall::lazy_brk;
 //lazy_static! {
     //
   //  pub static ref DEV_NON_BLOCKING_ACCESS: IntrCell<bool> =
@@ -140,20 +139,30 @@ impl ArchInterface for ArchInterfaceImpl {
                 println!("\nheaptop={} data={}",inner.heap_top,inner.max_data_addr);
                 drop(inner);
                 */
-                match lazy_brk(_paddr) {
-                    Ok(0) => {
-                        /*println!("lazy-brk: {}",_paddr);
-                        let task=current_task().unwrap();
-                        let inner=task.inner_exclusive_access();
-                        println!("heaptop={} data={}",inner.heap_top,inner.max_data_addr);
-                        */
-                    }
-                    _ => {
-                        println!("errpage = {:x}",_paddr/PAGE_SIZE);
-                        println!("err {:x?},sepc:{:x}", trap_type,ctx.sepc);
-                    //      ctx.syscall_ok();
-                        exit_current_and_run_next(-1);
+                let ctask = current_task().unwrap();
+                let inner = ctask.inner_exclusive_access();
+                if inner.memory_set.lock().handle_lazy_addr(_paddr, trap_type).is_err() {
+                    match trap_type {
+                        StorePageFault(_paddr)=>{
+                            let mut memory_set = inner.memory_set.lock();
+                            let r = memory_set.handle_cow_addr(_paddr);
+                            if r.is_err(){
+                                memory_set.debug_addr_info();                                
+                                println!("err {:x?},sepc:{:x},sepcpage:{:x}", trap_type,ctx.sepc,ctx.sepc/PAGE_SIZE);
+                                //      ctx.syscall_ok();
+                                drop(memory_set);
+                                drop(inner);
+                                exit_current_and_run_next(-1);
+                            }
+                        }
+                        _ =>{
+                            println!("err {:x?},sepc:{:x},sepcpage:{:x}", trap_type,ctx.sepc,ctx.sepc/PAGE_SIZE);
+                            //      ctx.syscall_ok();
+                            drop(inner);
+                            exit_current_and_run_next(-1);
+                        }
                     }
+
                 }
                 //println!("err {:x?},sepc:{:x}", trap_type,ctx.sepc);
           //      ctx.syscall_ok();
diff --git a/os/src/mm/memory_set.rs b/os/src/mm/memory_set.rs
index 71fe8d39202b1d4c3a7aabb98452adb325987a92..5510a66b64840b38323c1fd84d643461241dbe1a 100644
--- a/os/src/mm/memory_set.rs
+++ b/os/src/mm/memory_set.rs
@@ -9,7 +9,7 @@ use alloc::alloc::dealloc;
 use alloc::string::String;
 use arch::pagetable::{MappingFlags, MappingSize, PageTable, PageTableWrapper};
 use arch::addr::{PhysAddr, PhysPage, VirtAddr, VirtPage};
-use arch::{USER_VADDR_END,PAGE_SIZE};
+use arch::{TrapType, PAGE_SIZE, USER_VADDR_END};
 use config::{USER_HEAP_SIZE, USER_MMAP_TOP, USER_STACK_SIZE, USER_STACK_TOP};
 use crate::sync::UPSafeCell;
 use alloc::collections::BTreeMap;
@@ -32,6 +32,7 @@ pub fn kernel_token() -> usize {
     KERNEL_SPACE.lock().token()
 } */
 /// memory set structure, controls virtual-memory space
+#[derive(Clone)]
 pub struct MemorySet {
     ///
     pub page_table: Arc<PageTableWrapper>,
@@ -86,11 +87,72 @@ impl MemorySet {
     pub fn push_into_heaparea_lazy(&mut self, map_area: MapArea) { 
         self.heap_area.push(map_area);
     }
-    pub fn handle_lazy_addr(&mut self,addr:usize)->SysResult<()>{
+    pub fn handle_lazy_addr(&mut self,addr:usize,_type:TrapType)->SysResult<isize>{
+        if let Some((ppn,_mp)) = self.translate(VirtPage::new(addr/PAGE_SIZE)){
+            if ppn.to_addr() != 0{
+                return Err(SysError::EADDRINUSE);
+            }
+        }
         for area in self.heap_area.iter_mut(){
             if area.vpn_range.get_start().to_addr() <= addr && area.vpn_range.get_end().to_addr() >= addr{
                 area.map_one(&self.page_table, VirtPage::new(addr/PAGE_SIZE));
-                return Ok(());
+                return Ok(0);
+            }
+        }
+        return Err(SysError::EADDRNOTAVAIL);
+    }
+    pub fn handle_cow_addr(&mut self,addr:usize)->SysResult<isize>{
+        for area in self.areas.iter_mut(){
+            if area.vpn_range.get_start().to_addr() <= addr && area.vpn_range.get_end().to_addr() >= addr{
+                if let Some((_ppn,mut mp)) = self.page_table.translate(VirtAddr::from(addr)){
+                    if mp.contains(MappingFlags::cow){
+                        let vpn = VirtPage::new(addr/PAGE_SIZE);
+                        let frame = area.data_frames.get(&vpn).unwrap();
+                        if Arc::strong_count(frame) == 1{
+                            mp |= MappingFlags::W;
+                            mp &= !MappingFlags::cow;
+                            self.page_table.map_page(vpn, frame.ppn, mp.into(), MappingSize::Page4KB);
+                            return Ok(0);
+                        }
+                        let src_ppn = area.data_frames.get(&vpn).unwrap().ppn;
+                        area.unmap_one(&self.page_table, vpn);
+                        area.map_one(&self.page_table, vpn);
+                        let dst_ppn = area.data_frames.get(&vpn).unwrap().ppn;
+                        dst_ppn.get_buffer().copy_from_slice(src_ppn.get_buffer());
+                        mp |= MappingFlags::W;
+                        mp &= !MappingFlags::cow;
+                        self.page_table.map_page(vpn, dst_ppn, mp.into(), MappingSize::Page4KB);
+                        return Ok(0);
+                    }
+                }
+            }
+        }
+        for area in self.heap_area.iter_mut(){
+            if area.vpn_range.get_start().to_addr() <= addr && area.vpn_range.get_end().to_addr() >= addr{
+                if let Some((_ppn,mut mp)) = self.page_table.translate(VirtAddr::from(addr)){
+                    if mp.contains(MappingFlags::cow){
+                        let vpn = VirtPage::new(addr/PAGE_SIZE);
+                        let frame = area.data_frames.get(&vpn).unwrap();
+                        if Arc::strong_count(frame) == 1{
+                            mp |= MappingFlags::W;
+                            mp &= !MappingFlags::cow;
+                            self.page_table.map_page(vpn, frame.ppn, mp.into(), MappingSize::Page4KB);
+                            return Ok(0);
+                        }
+                        let src_ppn = area.data_frames.get(&vpn).unwrap().ppn;
+                        area.unmap_one(&self.page_table, vpn);
+                        area.map_one(&self.page_table, vpn);
+                        let dst_ppn = area.data_frames.get(&vpn).unwrap().ppn;
+                        dst_ppn.get_buffer().copy_from_slice(src_ppn.get_buffer());
+                        mp |= MappingFlags::W;
+                        mp &= !MappingFlags::cow;
+                        self.page_table.map_page(vpn, dst_ppn, mp.into(), MappingSize::Page4KB);
+                        return Ok(0);
+                    }
+                    else{
+                        println!("doesn't has cow");
+                    }
+                }
             }
         }
         return Err(SysError::EADDRNOTAVAIL);
@@ -197,14 +259,50 @@ impl MemorySet {
             header_va + elf_header.pt2.ph_offset() as usize
         )
     }
+    /// 打印memset
+    pub fn show(&self) {
+        println!("\nareas");
+        for area in &self.areas {
+            println!("range {}-{}",area.vpn_range.get_start(),area.vpn_range.get_end());
+            for (vpn,frame) in &area.data_frames {
+                println!("{:x} {} {} arc:{}",vpn.value(),frame.ppn,self.page_table.get_pte_flags(*vpn).bits(),Arc::strong_count(&frame));
+            }
+        }
+
+        println!("\nheap");
+        for area in &self.heap_area {
+            println!("range {}-{}",area.vpn_range.get_start(),area.vpn_range.get_end());
+            for (vpn,frame) in &area.data_frames {
+                println!("{:x} {} {} arc:{}",vpn.value(),frame.ppn,self.page_table.get_pte_flags(*vpn).bits(),Arc::strong_count(&frame));
+            }
+        }
+
+        println!("\nmmap");
+        for area in &self.mmap_area {
+            println!("range {}-{}",area.vpn_range.get_start(),area.vpn_range.get_end());
+            for (vpn,frame) in &area.data_frames {
+                println!("{:x} {} {} arc:{}",vpn.value(),frame.ppn,self.page_table.get_pte_flags(*vpn).bits(),Arc::strong_count(&frame));
+            }
+        }
+    }
     ///Clone a same `MemorySet`
     pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
         let mut memory_set = Self::new_bare();
         // map trampoline
         // copy data sections/trap_context/user_stack
+        let pagetable = memory_set.page_table.clone();
         for area in user_space.areas.iter() {
-            let new_area = MapArea::from_another(area);
-            memory_set.push(new_area, None);
+            let mut new_area = MapArea::from_another(area);
+            new_area.data_frames = area.data_frames.clone();
+            for (vpn,frame) in new_area.data_frames.iter(){
+                let mut pte = user_space.page_table.get_pte_flags(*vpn);
+                if pte.contains(MappingFlags::W) || pte.contains(MappingFlags::cow){
+                    pte |= MappingFlags::cow;
+                    pte &= !MappingFlags::W;
+                }
+                pagetable.map_page(*vpn, frame.ppn, pte.into(), MappingSize::Page4KB);
+            }
+            /* 
             // copy data from another space
             for vpn in area.vpn_range {
                 let src_ppn = user_space.translate(vpn).unwrap().0;
@@ -213,29 +311,27 @@ impl MemorySet {
                 //    .get_bytes_array()
                   //  .copy_from_slice(src_ppn.get_bytes_array());
                   dst_ppn.get_buffer().copy_from_slice(src_ppn.get_buffer())
-            }
+            }*/
+            memory_set.areas.push(new_area);
         }
         // copy heap_area (可能出错)
         for area in user_space.heap_area.iter() {
-            let new_area = MapArea::from_another(area);
-            memory_set.push_into_heaparea_lazy_while_clone(new_area);
-       //     println!("fork from user push heap");
-            // copy data from another space
+            let mut new_area = MapArea::from_another(area);
+            new_area.data_frames = area.data_frames.clone();
             for vpn in area.vpn_range {
-                if let Some(src_ppn) = user_space.translate(vpn){
-                    if src_ppn.0.to_addr() != 0 {
-                  //      println!("vpn:{:x} ppn:{:x}",vpn.to_addr(),src_ppn.0.to_addr());
-                        let dst_ppn = memory_set.translate(vpn).unwrap();
-                        dst_ppn.0.get_buffer().copy_from_slice(src_ppn.0.get_buffer())
+                //self.map_one(page_table, vpn);
+                if let Some((ppn,_mp)) = user_space.translate(vpn.into()){
+                    if ppn.to_addr() != 0 {
+                        let mut pte = user_space.page_table.get_pte_flags(vpn);
+                        if pte.contains(MappingFlags::W) || pte.contains(MappingFlags::cow){
+                            pte |= MappingFlags::cow;
+                            pte &= !MappingFlags::W;
+                        }
+                        pagetable.map_page(vpn, ppn, pte.into(), MappingSize::Page4KB);
                     }
-                    
-
-                }
-                
-              //  dst_ppn
-                //    .get_bytes_array()
-                  //  .copy_from_slice(src_ppn.get_bytes_array());
+                }  
             }
+            memory_set.heap_area.push(new_area);
         }
         //copy mmap_area (可能出错)
         for area in user_space.mmap_area.iter() {
@@ -286,6 +382,9 @@ impl MemorySet {
         for ele in &self.areas {
             println!("{:x} {:x} {:x} {:x}",ele.vpn_range.get_start_addr().addr(),ele.vpn_range.get_end_addr().addr(),ele.vpn_range.get_start().to_addr(),ele.vpn_range.get_end().to_addr());
         }
+        for ele in &self.heap_area {
+            println!("{:x} {:x} {:x} {:x}",ele.vpn_range.get_start_addr().addr(),ele.vpn_range.get_end_addr().addr(),ele.vpn_range.get_start().to_addr(),ele.vpn_range.get_end().to_addr());
+        }
     }
 }
 /*
@@ -344,12 +443,12 @@ impl MemorySet {
 }*/
 
 /// map area structure, controls a contiguous piece of virtual memory
-
+#[derive(Clone)]
 pub struct MapArea {
     ///
     pub vpn_range: VPNRange,
     ///
-    pub data_frames: BTreeMap<VirtPage, FrameTracker>,
+    pub data_frames: BTreeMap<VirtPage, Arc<FrameTracker>>,
     ///
     pub map_type: MapType,
     ///
@@ -388,7 +487,7 @@ impl MapArea {
     pub fn map_one(&mut self, page_table: &Arc<PageTableWrapper>, vpn: VirtPage) {
         let frame = frame_alloc().unwrap();
         let ppn: PhysPage = frame.ppn;
-        self.data_frames.insert(vpn, frame);
+        self.data_frames.insert(vpn, frame.into());
         /*match self.map_type {
             MapType::Identical => {
                 ppn = PhysPageNum(vpn.0);
@@ -402,13 +501,18 @@ impl MapArea {
         //let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap();
         page_table.map_page(vpn, ppn, self.map_perm.into(),MappingSize::Page4KB);
     }
-    /*
-    pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPage, ppn: PhysPage) {
-        if self.map_type == MapType::Framed {
-            self.data_frames.remove(&ppn);
-        }
+    pub fn map_one_with_flags(&mut self, page_table: &Arc<PageTableWrapper>, vpn: VirtPage, flags: MappingFlags) {
+        let frame = frame_alloc().unwrap();
+        let ppn: PhysPage = frame.ppn;
+        self.data_frames.insert(vpn, frame.into());
+        
+        page_table.map_page(vpn, ppn, flags,MappingSize::Page4KB);
+    }
+    
+    pub fn unmap_one(&mut self, page_table: &Arc<PageTableWrapper>, vpn: VirtPage) {
+        self.data_frames.remove(&vpn);
         page_table.unmap_page(vpn);
-    }*/
+    }
     ///
     pub fn map(&mut self, page_table: &Arc<PageTableWrapper>) {
         for vpn in self.vpn_range {
@@ -416,7 +520,7 @@ impl MapArea {
             let p_tracker = frame_alloc().expect("can't allocate frame");
             //println!("vpn={},ppn={}",vpn,p_tracker.ppn);
             page_table.map_page(vpn, p_tracker.ppn, self.map_perm.into(), MappingSize::Page4KB);
-            self.data_frames.insert(vpn, p_tracker);   
+            self.data_frames.insert(vpn, p_tracker.into());   
         }
     } 
     /* 
diff --git a/os/src/syscall/fs.rs b/os/src/syscall/fs.rs
index 1f9c1270511962739fadfde62e369fe80c88bb12..2d85213d9b08350ea5263301289cf90f918c6111 100644
--- a/os/src/syscall/fs.rs
+++ b/os/src/syscall/fs.rs
@@ -303,7 +303,7 @@ pub fn sys_mmap(
     let start_va = VirtAddr::new(start);
     let end_va = VirtAddr::new(start + len);
 
-    inner.memory_set.push_into_mmaparea(
+    inner.memory_set.lock().push_into_mmaparea(
         MapArea::new(
             start_va,
             end_va,
@@ -361,8 +361,8 @@ pub fn sys_munmap(start: *mut usize, _len: usize) -> SysResult<isize> {
     // release current task TCB manually to avoid multi-borrow
     drop(inner);
     file.write_at(0, &mut buf);
-    let mut inner = task.inner_exclusive_access();
-    let res = inner.memory_set.remove_map_area_by_vpn_start(VirtAddr::new(start as usize).into());
+    let inner = task.inner_exclusive_access();
+    let res = inner.memory_set.lock().remove_map_area_by_vpn_start(VirtAddr::new(start as usize).into());
     if res < 0 {
         // 找不到地址
         return Err(SysError::EFAULT);
diff --git a/os/src/syscall/mod.rs b/os/src/syscall/mod.rs
index f7fd4f96fabb76a0aca2bf86f292ae6b8b47b6dd..7ec0dbc0de60f1aeece32e3f0ff0ef6ede9528d4 100644
--- a/os/src/syscall/mod.rs
+++ b/os/src/syscall/mod.rs
@@ -91,7 +91,7 @@ use config::RLimit;
 use system_result::{SysResult,SysError};
 const MODULE_LEVEL:log::Level = log::Level::Trace;
 use crate::task::check_pending_signals;
-pub use process::lazy_brk;
+pub use process::CloneFlags;
 /// handle syscall exception with `syscall_id` and other arguments
 pub fn syscall(syscall_id: usize, args: [usize; 6]) -> isize {
    // println!("syscallid:{}",syscall_id);
diff --git a/os/src/syscall/process.rs b/os/src/syscall/process.rs
index 84ced290667db3ced2cb2dce31d4b9352b0ee1b8..ac737a12a0a5868beb50becd1125d2864d67f250 100644
--- a/os/src/syscall/process.rs
+++ b/os/src/syscall/process.rs
@@ -2,7 +2,7 @@ use core::f32::consts::E;
 use core::ops::Add;
 
 use crate::fs::{open_file,path_to_dentry,path_to_father_dentry,create_file};
-use crate::mm::{translated_ref, translated_refmut, translated_str, MapType};
+use crate::mm::{frame_alloc, frame_dealloc, translated_ref, translated_refmut, translated_str, MapType};
 use crate::task::{
     self, UNAME,add_task, current_task, current_user_token, 
     exit_current_and_run_next, suspend_current_and_run_next,SignalFlags,pid2task,remove_from_pid2task,
@@ -113,7 +113,7 @@ pub fn sys_clone(flags:usize,stack_ptr:*const u8,ptid:*mut i32,_tls:*mut i32,cti
     }
     let flags = flags.unwrap();
     let current_task = current_task().unwrap();
-    let new_task = current_task.fork();    
+    let new_task = current_task.fork(flags);    
     let new_pid = new_task.pid.0;
     // modify trap context of new_task, because it returns immediately after switching
     let trap_cx = new_task.inner_exclusive_access().get_trap_cx();
@@ -212,7 +212,7 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> SysResult<isize> {
             let exit_code = child.inner_exclusive_access().exit_code;
             // ++++ release child PCB
             if exit_code_ptr != core::ptr::null_mut(){
-                *translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
+                *translated_refmut(inner.memory_set.lock().token(), exit_code_ptr) = exit_code;
             }
             return Ok(found_pid as isize);
         } else {
@@ -234,33 +234,7 @@ pub fn sys_chdir(path: *const u8) -> SysResult<isize> {
     task_inner.cwd = dentry;
     Ok(0)
 }
-pub fn lazy_brk(error_addr: usize) -> SysResult<isize> {
-    let task = current_task().unwrap();
-    let mut inner = task.inner_exclusive_access();
-    inner.memory_set.handle_lazy_addr(error_addr)?;
-    /* 
-    if error_addr >= inner.max_data_addr && error_addr < inner.heap_top {
-        let cur_addr = VirtAddr::new(inner.max_data_addr).floor();
-        let new_addr = VirtAddr::new(error_addr).ceil();
-        let page_count = (new_addr.addr() - cur_addr.addr()) / PAGE_SIZE;
-        let alloc_start_addr = inner.max_data_addr;
- 
-        inner.memory_set.push_into_heaparea(
-            MapArea::new(
-                VirtAddr::new(alloc_start_addr),
-                VirtAddr::new(error_addr),
-                MapType::Framed,
-                MapPermission::R | MapPermission::U | MapPermission::W | MapPermission::X,
-            ),
-            None,
-        );
- 
-        inner.max_data_addr += PAGE_SIZE * page_count;
-        return Ok(0);
-    }*/
- 
-    Ok(0)
-}
+
 
 pub fn sys_brk(new_brk:  usize) -> SysResult<isize> {
     let task = current_task().unwrap();
@@ -289,7 +263,7 @@ pub fn sys_brk(new_brk:  usize) -> SysResult<isize> {
         
         let page_count = (new_addr.addr() - cur_addr.addr()) / PAGE_SIZE;
         let alloc_start_addr = task_inner.max_data_addr;
-        task_inner.memory_set.push_into_heaparea_lazy(
+        task_inner.memory_set.lock().push_into_heaparea_lazy(
             MapArea::new(
                 VirtAddr::new(alloc_start_addr), //向下
                 VirtAddr::new(new_brk), //向上
@@ -576,7 +550,7 @@ pub fn sys_mprotect(addr: VirtAddr, len: usize, prot: i32) -> SysResult<isize> {
     }
 
     let task = current_task().unwrap();
-    let mut inner = task.inner_exclusive_access();
+    let inner = task.inner_exclusive_access();
 
    // log_debug!("before mprotect:");
    // inner.memory_set.debug_addr_info();
@@ -617,35 +591,48 @@ pub fn sys_mprotect(addr: VirtAddr, len: usize, prot: i32) -> SysResult<isize> {
     let mut target_area_id = None;
     if !is_find {
    //     log_debug!("check normal");
-        target_area_id = process_area(&mut inner.memory_set.areas, 0, &mut is_find);
+        target_area_id = process_area(&mut inner.memory_set.lock().areas, 0, &mut is_find);
     }
     if !is_find && target_area_id.is_none() {
   //      log_debug!("check mmap");
-        target_area_id = process_area(&mut inner.memory_set.mmap_area, 1, &mut is_find);
+        target_area_id = process_area(&mut inner.memory_set.lock().mmap_area, 1, &mut is_find);
     }
     if !is_find && target_area_id.is_none() {
   //      log_debug!("check heap");
-        target_area_id = process_area(&mut inner.memory_set.heap_area, 2, &mut is_find);
+        target_area_id = process_area(&mut inner.memory_set.lock().heap_area, 2, &mut is_find);
 }
 
     // 新区间插入对应area
     for new_area in new_areas {
         if let Some(area) = new_area {
             for (vpn,frame) in area.data_frames.iter(){
-                inner.memory_set.page_table.map_page(*vpn, frame.ppn, area.map_perm.into(), arch::pagetable::MappingSize::Page4KB);
+                inner.memory_set.lock().page_table.map_page(*vpn, frame.ppn, area.map_perm.into(), arch::pagetable::MappingSize::Page4KB);
             }
             match target_area_id {
-                Some(0) => inner.memory_set.areas.push(area),
-                Some(1) => inner.memory_set.mmap_area.push(area),
-                Some(2) => inner.memory_set.heap_area.push(area),
+                Some(0) => inner.memory_set.lock().areas.push(area),
+                Some(1) => inner.memory_set.lock().mmap_area.push(area),
+                Some(2) => inner.memory_set.lock().heap_area.push(area),
                 _ => panic!("wrong area_id in mprotect call!"),
             }
         }
     }
-    inner.memory_set.activate();
+    inner.memory_set.lock().activate();
   //  log_debug!("after mprotect:");
   //  inner.memory_set.debug_addr_info();
     if is_find {
+        let mut v: usize = start_vpn.value();
+        while v < end_vpn.value() {
+        let vaddr = VirtAddr::new(VirtPage::new(v).to_addr());
+        let memory_set = inner.memory_set.lock();
+        if let Some(paddr) = memory_set.page_table.translate(vaddr) {
+            let ppn = PhysPage::from_addr(paddr.0.addr());
+            let vpn =VirtPage::from_addr(vaddr.addr());
+            memory_set.page_table.map_page(vpn, ppn, perm.into(), arch::pagetable::MappingSize::Page4KB);
+        }
+        
+        v += 1;
+        }
+        inner.memory_set.lock().activate();
         Ok(0)
     } else {
         Err(SysError::ENXIO)
diff --git a/os/src/task/mod.rs b/os/src/task/mod.rs
index b31f5147f9cd7e17becf48011b5bc0b2af201491..020e712454f7ca3f8c13097485f4aea2a1e0f8d4 100644
--- a/os/src/task/mod.rs
+++ b/os/src/task/mod.rs
@@ -108,7 +108,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
         }
     }
     inner.children.clear();
-    inner.memory_set.recycle_data_pages();
+    inner.memory_set.lock().recycle_data_pages();
     drop(inner);
     drop(task);
     let mut _unused = KContext::blank();
diff --git a/os/src/task/processor.rs b/os/src/task/processor.rs
index 09aeacd7b0e08c8bf6dd99f04fa81a5ee915a58d..d60497338c397ffdfdba53237d3d6e27fc1f5f41 100644
--- a/os/src/task/processor.rs
+++ b/os/src/task/processor.rs
@@ -56,7 +56,7 @@ pub fn run_tasks() {
             let mut task_inner = task.inner_exclusive_access();
             let next_task_cx_ptr = &task_inner.task_cx as *const KContext;
             task_inner.task_status = TaskStatus::Running;
-            let token = task_inner.memory_set.token();
+            let token = task_inner.memory_set.lock().token();
             drop(task_inner);
             // release coming task TCB manually
             processor.current = Some(task);
diff --git a/os/src/task/task.rs b/os/src/task/task.rs
index 390fe94d6d398824360dbc2fe10d55194cf279d0..0b12b898becc43b877acd98523a453d3a4aeaac0 100644
--- a/os/src/task/task.rs
+++ b/os/src/task/task.rs
@@ -24,6 +24,7 @@ use core::mem::size_of;
 use crate::task::SignalFlags;
 use crate::task::signal::SigAction;
 use crate::task::action::SignalActions;
+use crate::syscall::CloneFlags;
 //use user_lib::{USER_HEAP_SIZE};
 
 const MODULE_LEVEL:log::Level = log::Level::Trace;
@@ -116,7 +117,7 @@ pub struct TaskControlBlockInner {
     pub base_size: usize,
     pub task_cx:KContext,
     pub task_status: TaskStatus,
-    pub memory_set: MemorySet,
+    pub memory_set: Arc<Mutex<MemorySet>>,
     pub kernel_stack: KernelStack,
     pub parent: Option<Weak<TaskControlBlock>>,
     pub children: Vec<Arc<TaskControlBlock>>,//why use Arc:TaskManager->TCB & TCB.children->TCB & TaskManager creates Arc<TCB>
@@ -171,7 +172,7 @@ impl TaskControlBlockInner {
         unsafe { paddr.as_mut().unwrap() }
     }
     pub fn get_user_token(&self) -> PageTable  {
-        self.memory_set.token()
+        self.memory_set.lock().token()
     }
     fn get_status(&self) -> TaskStatus {
         self.task_status
@@ -202,7 +203,7 @@ impl TaskControlBlock {
                     base_size: user_sp,
                     task_cx: blank_kcontext(kstack.get_position().1),
                     task_status: TaskStatus::Ready,
-                    memory_set,
+                    memory_set:Arc::new(Mutex::new(memory_set)),
                     parent: None,
                     children: Vec::new(),
                     exit_code: 0,
@@ -353,7 +354,7 @@ impl TaskControlBlock {
         // **** access current TCB exclusively
         let mut inner = self.inner_exclusive_access();
         // substitute memory_set
-        inner.memory_set = memory_set;
+        inner.memory_set = Arc::new(Mutex::new(memory_set));
         // update trap_cx ppn
         // FIXME: This is a temporary solution
         inner.trap_cx = TrapFrame::new();
@@ -369,11 +370,17 @@ impl TaskControlBlock {
         // **** release current PCB
     }
     ///
-    pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
+    pub fn fork(self: &Arc<TaskControlBlock>, flags: CloneFlags) -> Arc<TaskControlBlock> {
         // ---- hold parent PCB lock
         let mut parent_inner = self.inner_exclusive_access();
         // copy user space(include trap context)
-        let memory_set = MemorySet::from_existed_user(&parent_inner.memory_set);
+        let memory_set;
+        if flags.contains(CloneFlags::VM) {
+            memory_set = parent_inner.memory_set.clone();
+        }
+        else {
+            memory_set = Arc::new(Mutex::new(MemorySet::from_existed_user(&parent_inner.memory_set.lock())));
+        }
         // alloc a pid and a kernel stack in kernel space
         let pid_handle = pid_alloc();
         let kstack = KernelStack::new();