Java IO 演进之路
Java IO 演进之路初代IO之BIO二代IO之NIO三代IO之selector多路复用器四代IO之netty
初代IO之BIO
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53package com.docker.play.io; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.ServerSocket; import java.net.Socket; /** * 阻塞IO,每连接对应每线程 */ public class SocketBIO2 { public static void main(String[] args) throws Exception{ ServerSocket serverSocket = new ServerSocket(8888); System.out.println("等待客户端连接......"); while (true) { Socket client = serverSocket.accept(); // 阻塞1 new Thread(new ClientMessageHandler(client)).start(); } } private static class ClientMessageHandler implements Runnable { private Socket client; public ClientMessageHandler(Socket client) { this.client = client; } @Override public void run() { try { System.out.println(client.getInetAddress() + "客户端建立连接"); while (true) { InputStream inputStream = client.getInputStream(); BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); System.out.println("客户端数据:" + bufferedReader.readLine()); // 阻塞2 } } catch (Exception e) { // TODO } finally { try { if (client != null) { client.close(); } } catch (IOException e1) { e1.printStackTrace(); } } } } }
BIO阻塞型IO,服务端启动之后accept阶段阻塞并等待客户端连接,一旦有客户端与服务端建立连接之后, 服务端再一次阻塞等待read客户端数据。整个过程会发生以上两次阻塞。代码中利用了多线程机制来避免发生阻塞。但是这种每连接每线程的方式,在大量连接的时候会导致服务端线程资源耗尽,不适用于高并发的场景。另一方面在while方法体里面不断的调用accept,read内核方法,增加了内核开销。
二代IO之NIO
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50package com.docker.play.io; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.LinkedList; /** * 非阻塞IO * C10k问题 * 空轮询 * 频繁调用内核accept和read方法 */ public class SocketNIO1 { public static void main(String[] args) throws Exception{ LinkedList<SocketChannel> clients = new LinkedList<SocketChannel>(); ServerSocketChannel socketChannel = ServerSocketChannel.open(); socketChannel.bind(new InetSocketAddress(8888)); socketChannel.configureBlocking(Boolean.FALSE); while(true) { SocketChannel client = socketChannel.accept(); // 不会阻塞了 if (client != null) { client.configureBlocking(Boolean.FALSE); System.out.println(client.socket().getInetAddress() + "客户端建立连接"); clients.add(client); } else { System.out.println("null......"); } ByteBuffer buffer = ByteBuffer.allocateDirect(1024); // 这个缓冲区可以在堆里,也可以在堆外 for (SocketChannel curClient : clients) { int num = curClient.read(buffer); if (num > 0) { buffer.flip(); // buffer指针翻转 byte[] byteData = new byte[buffer.limit()]; // 新建buffer长度的数组 buffer.get(byteData); // 将buffer中的数据填充到数组中 String dataStr = new String(byteData); System.out.println(client.socket().getInetAddress() + "客户端数据:" + dataStr); buffer.clear(); } } } } }
NIO新IO又称非阻塞IO,NIO在服务端accept等待连接阶段和read读取客户端数据阶段都不会发生阻塞,但是仍然存在在while方法体中不断调用accept和read内核方法的问题,一旦没有客户端连接,或者客户端没有发送数据,这种调用方式会引发空轮询的问题。在高并发的场景下,会带来很大的内核开销。
三代IO之selector多路复用器
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92package com.docker.play.io; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Iterator; import java.util.Set; public class SocketSelector1 { private ServerSocketChannel server; private Selector selector; private int port = 8888; public void initServer() { try { server = ServerSocketChannel.open(); server.configureBlocking(Boolean.FALSE); server.bind(new InetSocketAddress(port)); selector = Selector.open(); server.register(selector, SelectionKey.OP_ACCEPT); } catch (IOException e) { e.printStackTrace(); } } public void start() { initServer(); System.out.println("服务器被启动了......"); try { while(true) { while(selector.select(0) > 0) { // 问内核有没有事件 Set<SelectionKey> selectionKeys = selector.selectedKeys(); // 从多路复用器中,拿出有效的key Iterator<SelectionKey> iterator = selectionKeys.iterator(); while (iterator.hasNext()) { SelectionKey selectionKey = iterator.next(); iterator.remove(); if (selectionKey.isAcceptable()) { // 是否是建立连接事件 acceptHandler(selectionKey); } else if (selectionKey.isReadable()) { // 是否是读数据事件 readhandler(selectionKey); } } } } } catch (IOException e) { e.printStackTrace(); } } public void acceptHandler(SelectionKey selectionKey) throws IOException{ ServerSocketChannel ssc = (ServerSocketChannel)selectionKey.channel(); SocketChannel client = ssc.accept(); client.configureBlocking(Boolean.FALSE); ByteBuffer buffer = ByteBuffer.allocate(8091); client.register(selector, SelectionKey.OP_READ, buffer); System.out.println("--------------------------------------------"); System.out.println("新客户端:" + client.getRemoteAddress()); System.out.println("--------------------------------------------"); } public void readhandler(SelectionKey selectionKey) throws IOException{ SocketChannel client = (SocketChannel)selectionKey.channel();// selector中拿出channel ByteBuffer byteBuffer = (ByteBuffer)selectionKey.attachment();// sselector中拿出buffer byteBuffer.clear(); int read = 0; while (true) { read = client.read(byteBuffer); if (read > 0) { byteBuffer.flip(); while (byteBuffer.hasRemaining()) { client.write(byteBuffer); // 将数据发回客户端 } byteBuffer.clear(); } else if (read == 0) { break; } else { client.close(); break; } } } public static void main(String[] args) { new SocketSelector1().start(); } }
selector多路复用器,是配合NIO以解决用户空间频繁进行accept和read内核方法调用的问题的组件。类似的多路复用器还有poll, epoll。selector原理是基于事件的,将accept连接和read读取数据,分为OP_ACCEPT,OP_READ两种事件。将用户空间被动的去调用内核方法的操作,转换为内核主动发送通知事件给用户空间,这样就避免的用户空间中的空轮询和频繁调用内核方法的问题,从而大大提高了效率。适用于高并发的场景。另一方面为了高效的读取数据selector中还引入了channel(双向数据传输管道)和buffer(数据缓冲区)。客户端和服务端通信数据通过channel管道传输,每个channel配一个buffer用于数据存储,客户端和服务端可以从buffer中读取数据。channel和buffer可以是一对多的关系,但是为了处理数据方法一个channel还是对应一个buffer比较好。
四代IO之netty
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71package com.docker.play.io; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelPipeline; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; /** * IO同步,处理异步 */ public class NettyIO { public static void main(String[] args) throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(2); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast(new EchoServerHandler()); } }); // Start the server. ChannelFuture f = b.bind(8888).sync(); System.out.println("EchoServer.main ServerBootstrap配置启动完成"); // Wait until the server socket is closed. f.channel().closeFuture().sync(); System.out.println("EchoServer.main end"); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } } class EchoServerHandler extends ChannelInboundHandlerAdapter { //接收请求后的处理类 @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ctx.write(msg); } //读取完成后处理方法 @Override public void channelReadComplete(ChannelHandlerContext ctx) { System.out.println("EchoServerHandler.channelReadComplete"); ctx.flush(); } //异常捕获处理方法 @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } }
利用原生的NIO +多路复用器API进行编程,代码有点复杂,容易出错,而且效率不高。为了简化NIO编程的难度,业界推出了netty(开源NIO框架),使IO编程变得极其简单。netty中对IO数据的读取是同步的,但是IO数据的处理是异步的,因此netty非常高效。
PS: 同步IO:需要用户空间的程序主动的去内核读取数据,以上四种IO都是同步IO;异步IO:不需要用户空间的程序主动去内核读取数据,比较复杂,需要操作系统提供支持。
最后
以上就是迷人魔镜最近收集整理的关于Java IO 演进之路的全部内容,更多相关Java内容请搜索靠谱客的其他文章。
发表评论 取消回复